dmu: rename dmu_tx_assign flags

Their names clash with those for txg_wait_synced_tx, and they aren't
directly compatible, leading to confusion.

Signed-off-by: Rob Norris <rob.norris@klarasystems.com>
(cherry picked from commit 1f0fb1dae7c1e84de3b39e669e09b8b3d5b80b87)
This commit is contained in:
Rob Norris 2023-04-10 09:34:22 +10:00 committed by Geoff Amey
parent b0d75996ba
commit 48a48059c7
35 changed files with 200 additions and 175 deletions

View File

@ -1745,10 +1745,11 @@ ztest_zd_fini(ztest_ds_t *zd)
ztest_rll_destroy(&zd->zd_range_lock[l]); ztest_rll_destroy(&zd->zd_range_lock[l]);
} }
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT) #define DMU_TX_ASSIGN_MIGHTWAIT \
(ztest_random(10) == 0 ? DMU_TX_ASSIGN_NOWAIT : DMU_TX_ASSIGN_WAIT)
static uint64_t static uint64_t
ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag) ztest_tx_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags, const char *tag)
{ {
uint64_t txg; uint64_t txg;
int error; int error;
@ -1756,10 +1757,10 @@ ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
/* /*
* Attempt to assign tx to some transaction group. * Attempt to assign tx to some transaction group.
*/ */
error = dmu_tx_assign(tx, txg_how); error = dmu_tx_assign(tx, flags);
if (error) { if (error) {
if (error == ERESTART) { if (error == ERESTART) {
ASSERT3U(txg_how, ==, TXG_NOWAIT); ASSERT3U(flags, ==, DMU_TX_ASSIGN_NOWAIT);
dmu_tx_wait(tx); dmu_tx_wait(tx);
} else { } else {
ASSERT3U(error, ==, ENOSPC); ASSERT3U(error, ==, ENOSPC);
@ -2005,7 +2006,7 @@ ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap)
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT); dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
} }
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
if (txg == 0) if (txg == 0)
return (ENOSPC); return (ENOSPC);
@ -2095,7 +2096,7 @@ ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name); dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
if (txg == 0) { if (txg == 0) {
ztest_object_unlock(zd, object); ztest_object_unlock(zd, object);
return (ENOSPC); return (ENOSPC);
@ -2177,7 +2178,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
P2PHASE(offset, length) == 0) P2PHASE(offset, length) == 0)
abuf = dmu_request_arcbuf(db, length); abuf = dmu_request_arcbuf(db, length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
if (txg == 0) { if (txg == 0) {
if (abuf != NULL) if (abuf != NULL)
dmu_return_arcbuf(abuf); dmu_return_arcbuf(abuf);
@ -2267,7 +2268,7 @@ ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length); dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
if (txg == 0) { if (txg == 0) {
ztest_range_unlock(rl); ztest_range_unlock(rl);
ztest_object_unlock(zd, lr->lr_foid); ztest_object_unlock(zd, lr->lr_foid);
@ -2308,7 +2309,7 @@ ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, lr->lr_foid); dmu_tx_hold_bonus(tx, lr->lr_foid);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
if (txg == 0) { if (txg == 0) {
dmu_buf_rele(db, FTAG); dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, lr->lr_foid); ztest_object_unlock(zd, lr->lr_foid);
@ -2722,7 +2723,7 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
dmu_tx_hold_write(tx, object, offset, size); dmu_tx_hold_write(tx, object, offset, size);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
if (txg != 0) { if (txg != 0) {
dmu_prealloc(os, object, offset, size, tx); dmu_prealloc(os, object, offset, size, tx);
@ -4770,7 +4771,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
/* This accounts for setting the checksum/compression. */ /* This accounts for setting the checksum/compression. */
dmu_tx_hold_bonus(tx, bigobj); dmu_tx_hold_bonus(tx, bigobj);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
if (txg == 0) { if (txg == 0) {
umem_free(packbuf, packsize); umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize); umem_free(bigbuf, bigsize);
@ -5060,7 +5061,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
dmu_tx_hold_write(tx, packobj, packoff, packsize); dmu_tx_hold_write(tx, packobj, packoff, packsize);
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize); dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
if (txg == 0) { if (txg == 0) {
umem_free(packbuf, packsize); umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize); umem_free(bigbuf, bigsize);
@ -5281,7 +5282,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
*/ */
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL); dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
if (txg == 0) if (txg == 0)
goto out; goto out;
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
@ -5349,7 +5350,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
*/ */
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL); dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
if (txg == 0) if (txg == 0)
goto out; goto out;
@ -5382,7 +5383,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL); dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
if (txg == 0) if (txg == 0)
goto out; goto out;
VERIFY0(zap_remove(os, object, txgname, tx)); VERIFY0(zap_remove(os, object, txgname, tx));
@ -5427,7 +5428,7 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id)
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, name); dmu_tx_hold_zap(tx, object, B_TRUE, name);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
if (txg == 0) if (txg == 0)
goto out; goto out;
error = zap_add(os, object, name, sizeof (uint64_t), 1, error = zap_add(os, object, name, sizeof (uint64_t), 1,
@ -5498,7 +5499,7 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
if (i >= 2) { if (i >= 2) {
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL); dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG); txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
if (txg == 0) { if (txg == 0) {
umem_free(od, sizeof (ztest_od_t)); umem_free(od, sizeof (ztest_od_t));
return; return;
@ -5663,7 +5664,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
error = -1; error = -1;
if (!error) if (!error)
error = dmu_tx_assign(tx, TXG_NOWAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_NOWAIT);
txg = error ? 0 : dmu_tx_get_txg(tx); txg = error ? 0 : dmu_tx_get_txg(tx);

View File

@ -269,14 +269,16 @@ typedef enum dmu_object_type {
} dmu_object_type_t; } dmu_object_type_t;
/* /*
* These flags are intended to be used to specify the "txg_how" * These flags are intended to be used to specify the "flags"
* parameter when calling the dmu_tx_assign() function. See the comment * parameter when calling the dmu_tx_assign() function. See the comment
* above dmu_tx_assign() for more details on the meaning of these flags. * above dmu_tx_assign() for more details on the meaning of these flags.
*/ */
#define TXG_NOWAIT (0ULL) typedef enum {
#define TXG_WAIT (1ULL<<0) DMU_TX_ASSIGN_NOWAIT = 0,
#define TXG_NOTHROTTLE (1ULL<<1) DMU_TX_ASSIGN_WAIT = (1U << 0),
#define TXG_NOSUSPEND (1ULL<<2) DMU_TX_ASSIGN_NOTHROTTLE = (1U << 1),
DMU_TX_ASSIGN_NOSUSPEND = (1U << 2),
} dmu_tx_assign_flag_t;
void byteswap_uint64_array(void *buf, size_t size); void byteswap_uint64_array(void *buf, size_t size);
void byteswap_uint32_array(void *buf, size_t size); void byteswap_uint32_array(void *buf, size_t size);
@ -785,7 +787,7 @@ void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow); void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow);
void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size); void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size);
void dmu_tx_abort(dmu_tx_t *tx); void dmu_tx_abort(dmu_tx_t *tx);
int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); int dmu_tx_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags);
void dmu_tx_wait(dmu_tx_t *tx); void dmu_tx_wait(dmu_tx_t *tx);
void dmu_tx_commit(dmu_tx_t *tx); void dmu_tx_commit(dmu_tx_t *tx);
void dmu_tx_mark_netfree(dmu_tx_t *tx); void dmu_tx_mark_netfree(dmu_tx_t *tx);

View File

@ -139,7 +139,7 @@ extern dmu_tx_stats_t dmu_tx_stats;
* These routines are defined in dmu.h, and are called by the user. * These routines are defined in dmu.h, and are called by the user.
*/ */
dmu_tx_t *dmu_tx_create(objset_t *dd); dmu_tx_t *dmu_tx_create(objset_t *dd);
int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how); int dmu_tx_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags);
void dmu_tx_commit(dmu_tx_t *tx); void dmu_tx_commit(dmu_tx_t *tx);
void dmu_tx_abort(dmu_tx_t *tx); void dmu_tx_abort(dmu_tx_t *tx);
uint64_t dmu_tx_get_txg(dmu_tx_t *tx); uint64_t dmu_tx_get_txg(dmu_tx_t *tx);

View File

@ -1998,7 +1998,7 @@ top:
} }
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
mutex_exit(&zp->z_acl_lock); mutex_exit(&zp->z_acl_lock);

View File

@ -336,7 +336,7 @@ zfs_unlinked_drain(zfsvfs_t *zfsvfs)
if (zp->z_links != 0) { if (zp->z_links != 0) {
tx = dmu_tx_create(zfsvfs->z_os); tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) { if (error != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
vput(ZTOV(zp)); vput(ZTOV(zp));
@ -398,7 +398,7 @@ zfs_purgedir(znode_t *dzp)
/* Is this really needed ? */ /* Is this really needed ? */
zfs_sa_upgrade_txholds(tx, xzp); zfs_sa_upgrade_txholds(tx, xzp);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
vput(ZTOV(xzp)); vput(ZTOV(xzp));
@ -494,7 +494,7 @@ zfs_rmnode(znode_t *zp)
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
/* /*
* Not enough space to delete the file. Leave it in the * Not enough space to delete the file. Leave it in the
@ -827,7 +827,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xvpp, cred_t *cr)
fuid_dirtied = zfsvfs->z_fuid_dirty; fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied) if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx); zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
zfs_acl_ids_free(&acl_ids); zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx); dmu_tx_abort(tx);

View File

@ -2159,7 +2159,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
ZFS_SA_ATTRS); ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
} }
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (error); return (error);

View File

@ -153,14 +153,14 @@ typedef ulong_t cookie_t;
* (3) All range locks must be grabbed before calling dmu_tx_assign(), * (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls. * as they can span dmu_tx_assign() calls.
* *
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to * (4) If ZPL locks are held, pass DMU_TX_ASSIGN_NOWAIT as the second argument
* dmu_tx_assign(). This is critical because we don't want to block * to dmu_tx_assign(). This is critical because we don't want to block
* while holding locks. * while holding locks.
* *
* If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This * If no ZPL locks are held (aside from ZFS_ENTER()), use
* reduces lock contention and CPU usage when we must wait (note that if * DMU_TX_ASSIGN_WAIT. This reduces lock contention and CPU usage when we
* throughput is constrained by the storage, nearly every transaction * must wait (note that if throughput is constrained by the storage, nearly
* must wait). * every transaction must wait).
* *
* Note, in particular, that if a lock is sometimes acquired before * Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing * the tx assigns, and sometimes after (e.g. z_lock), then failing
@ -168,15 +168,16 @@ typedef ulong_t cookie_t;
* *
* Thread A has grabbed a lock before calling dmu_tx_assign(). * Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock. * Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() * Thread A calls dmu_tx_assign(DMU_TX_ASSIGN_WAIT) and blocks in
* forever, because the previous txg can't quiesce until B's tx commits. * txg_wait_open() forever, because the previous txg can't quiesce until
* B's tx commits.
* *
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent * DMU_TX_ASSIGN_NOWAIT, then drop all locks, call dmu_tx_wait(), and try
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT, * again. On subsequent calls to dmu_tx_assign(), pass
* to indicate that this operation has already called dmu_tx_wait(). * DMU_TX_ASSIGN_NOTHROTTLE in addition to DMU_TX_ASSIGN_NOWAIT, to
* This will ensure that we don't retry forever, waiting a short bit * indicate that this operation has already called dmu_tx_wait(). This
* each time. * will ensure that we don't retry forever, waiting a short bit each time.
* *
* (5) If the operation succeeded, generate the intent log entry for it * (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events * before dropping locks. This ensures that the ordering of events
@ -198,7 +199,8 @@ typedef ulong_t cookie_t;
* rw_enter(...); // grab any other locks you need * rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx * tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify * dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); * error = dmu_tx_assign(tx,
* (waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
* if (error) { * if (error) {
* rw_exit(...); // drop locks * rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry * zfs_dirent_unlock(dl); // unlock directory entry
@ -1162,7 +1164,7 @@ zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes); 0, acl_ids.z_aclp->z_acl_bytes);
} }
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
zfs_acl_ids_free(&acl_ids); zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -1287,7 +1289,7 @@ zfs_remove_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
*/ */
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
@ -1502,7 +1504,7 @@ zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE); ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
zfs_acl_ids_free(&acl_ids); zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -1605,7 +1607,7 @@ zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp); zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
@ -2694,7 +2696,7 @@ zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err) if (err)
goto out; goto out;
@ -3396,7 +3398,7 @@ zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
zfs_sa_upgrade_txholds(tx, szp); zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
goto out_seq; goto out_seq;
@ -3592,7 +3594,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
} }
if (fuid_dirtied) if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx); zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
zfs_acl_ids_free(&acl_ids); zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -3797,7 +3799,7 @@ zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name); dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
zfs_sa_upgrade_txholds(tx, szp); zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp); zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs); ZFS_EXIT(zfsvfs);
@ -3926,7 +3928,7 @@ zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {
@ -4255,7 +4257,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
goto out; goto out;

View File

@ -1428,7 +1428,7 @@ zfs_extend(znode_t *zp, uint64_t end)
newblksz = 0; newblksz = 0;
} }
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
zfs_rangelock_exit(lr); zfs_rangelock_exit(lr);
@ -1546,7 +1546,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
zfs_rangelock_exit(lr); zfs_rangelock_exit(lr);
@ -1627,7 +1627,7 @@ log:
tx = dmu_tx_create(zfsvfs->z_os); tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (error); return (error);

View File

@ -677,7 +677,7 @@ zvol_geom_bio_strategy(struct bio *bp)
if (bp->bio_cmd == BIO_DELETE) { if (bp->bio_cmd == BIO_DELETE) {
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) { if (error != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {
@ -697,7 +697,7 @@ zvol_geom_bio_strategy(struct bio *bp)
} else { } else {
dmu_tx_t *tx = dmu_tx_create(os); dmu_tx_t *tx = dmu_tx_create(os);
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, size); dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, size);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {
@ -842,7 +842,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
bytes = volsize - off; bytes = volsize - off;
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes); dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
break; break;
@ -1104,7 +1104,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length, lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length,
RL_WRITER); RL_WRITER);
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) { if (error != 0) {
sync = FALSE; sync = FALSE;
dmu_tx_abort(tx); dmu_tx_abort(tx);

View File

@ -2164,7 +2164,7 @@ top:
} }
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
mutex_exit(&zp->z_acl_lock); mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock); mutex_exit(&zp->z_lock);

View File

@ -617,7 +617,7 @@ zfs_purgedir(znode_t *dzp)
/* Is this really needed ? */ /* Is this really needed ? */
zfs_sa_upgrade_txholds(tx, xzp); zfs_sa_upgrade_txholds(tx, xzp);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
zfs_zrele_async(xzp); zfs_zrele_async(xzp);
@ -717,7 +717,7 @@ zfs_rmnode(znode_t *zp)
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END); dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
/* /*
* Not enough space to delete the file. Leave it in the * Not enough space to delete the file. Leave it in the
@ -1084,7 +1084,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
fuid_dirtied = zfsvfs->z_fuid_dirty; fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied) if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx); zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
zfs_acl_ids_free(&acl_ids); zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx); dmu_tx_abort(tx);

View File

@ -2006,7 +2006,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
ZFS_SA_ATTRS); ZFS_SA_ATTRS);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL); dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
} }
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (error); return (error);

View File

@ -103,14 +103,14 @@
* (3) All range locks must be grabbed before calling dmu_tx_assign(), * (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls. * as they can span dmu_tx_assign() calls.
* *
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to * (4) If ZPL locks are held, pass DMU_TX_ASSIGN_NOWAIT as the second argument
* dmu_tx_assign(). This is critical because we don't want to block * to dmu_tx_assign(). This is critical because we don't want to block
* while holding locks. * while holding locks.
* *
* If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This * If no ZPL locks are held (aside from ZFS_ENTER()), use
* reduces lock contention and CPU usage when we must wait (note that if * DMU_TX_ASSIGN_WAIT. This reduces lock contention and CPU usage when we
* throughput is constrained by the storage, nearly every transaction * must wait (note that if throughput is constrained by the storage, nearly
* must wait). * every transaction must wait).
* *
* Note, in particular, that if a lock is sometimes acquired before * Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing * the tx assigns, and sometimes after (e.g. z_lock), then failing
@ -118,14 +118,16 @@
* *
* Thread A has grabbed a lock before calling dmu_tx_assign(). * Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock. * Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open() * Thread A calls dmu_tx_assign(DMU_TX_ASSIGN_WAIT) and blocks in
* forever, because the previous txg can't quiesce until B's tx commits. * txg_wait_open() forever, because the previous txg can't quiesce until
* B's tx commits.
* *
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT, * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent * DMU_TX_ASSIGN_NOWAIT, then drop all locks, call dmu_tx_wait(), and try
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT, * again. On subsequent calls to dmu_tx_assign(), pass
* to indicate that this operation has already called dmu_tx_wait(). * DMU_TX_ASSIGN_NOTHROTTLE in addition to DMU_TX_ASSIGN_NOWAIT, to
* This will ensure that we don't retry forever, waiting a short bit * indicate that this operation has already called dmu_tx_wait(). This
* will ensure that we don't retry forever, waiting a short bit
* each time. * each time.
* *
* (5) If the operation succeeded, generate the intent log entry for it * (5) If the operation succeeded, generate the intent log entry for it
@ -148,7 +150,8 @@
* rw_enter(...); // grab any other locks you need * rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx * tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify * dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); * error = dmu_tx_assign(tx,
* (waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
* if (error) { * if (error) {
* rw_exit(...); // drop locks * rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry * zfs_dirent_unlock(dl); // unlock directory entry
@ -699,8 +702,8 @@ top:
0, acl_ids.z_aclp->z_acl_bytes); 0, acl_ids.z_aclp->z_acl_bytes);
} }
error = dmu_tx_assign(tx, error = dmu_tx_assign(tx, (waited ?
(waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
zfs_dirent_unlock(dl); zfs_dirent_unlock(dl);
if (error == ERESTART) { if (error == ERESTART) {
@ -890,7 +893,8 @@ top:
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes); 0, acl_ids.z_aclp->z_acl_bytes);
} }
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); error = dmu_tx_assign(tx,
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
if (error == ERESTART) { if (error == ERESTART) {
waited = B_TRUE; waited = B_TRUE;
@ -1055,7 +1059,8 @@ top:
*/ */
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); error = dmu_tx_assign(tx,
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
zfs_dirent_unlock(dl); zfs_dirent_unlock(dl);
if (error == ERESTART) { if (error == ERESTART) {
@ -1298,7 +1303,8 @@ top:
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes + dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE); ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); error = dmu_tx_assign(tx,
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
zfs_dirent_unlock(dl); zfs_dirent_unlock(dl);
if (error == ERESTART) { if (error == ERESTART) {
@ -1443,7 +1449,8 @@ top:
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp); zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); error = dmu_tx_assign(tx,
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
rw_exit(&zp->z_parent_lock); rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock); rw_exit(&zp->z_name_lock);
@ -1768,7 +1775,7 @@ zfs_setattr_dir(znode_t *dzp)
else else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err) if (err)
break; break;
@ -2320,7 +2327,7 @@ top:
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err) if (err)
goto out; goto out;
@ -2929,7 +2936,8 @@ top:
zfs_sa_upgrade_txholds(tx, szp); zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL); dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); error = dmu_tx_assign(tx,
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
if (zl != NULL) if (zl != NULL)
zfs_rename_unlock(&zl); zfs_rename_unlock(&zl);
@ -3131,7 +3139,8 @@ top:
} }
if (fuid_dirtied) if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx); zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); error = dmu_tx_assign(tx,
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
zfs_dirent_unlock(dl); zfs_dirent_unlock(dl);
if (error == ERESTART) { if (error == ERESTART) {
@ -3378,7 +3387,8 @@ top:
zfs_sa_upgrade_txholds(tx, szp); zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp); zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT); error = dmu_tx_assign(tx,
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
if (error) { if (error) {
zfs_dirent_unlock(dl); zfs_dirent_unlock(dl);
if (error == ERESTART) { if (error == ERESTART) {
@ -3590,7 +3600,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_NOWAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_NOWAIT);
if (err != 0) { if (err != 0) {
if (err == ERESTART) if (err == ERESTART)
dmu_tx_wait(tx); dmu_tx_wait(tx);
@ -3687,7 +3697,7 @@ zfs_dirty_inode(struct inode *ip, int flags)
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
goto out; goto out;
@ -3744,7 +3754,7 @@ zfs_inactive(struct inode *ip)
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {

View File

@ -1528,7 +1528,7 @@ zfs_extend(znode_t *zp, uint64_t end)
newblksz = 0; newblksz = 0;
} }
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
zfs_rangelock_exit(lr); zfs_rangelock_exit(lr);
@ -1714,7 +1714,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
zfs_rangelock_exit(lr); zfs_rangelock_exit(lr);
@ -1785,7 +1785,7 @@ log:
tx = dmu_tx_create(zfsvfs->z_os); tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
goto out; goto out;

View File

@ -152,7 +152,7 @@ zvol_write(zv_request_t *zvr)
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes); dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
/* This will only fail for ENOSPC */ /* This will only fail for ENOSPC */
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
break; break;
@ -242,7 +242,7 @@ zvol_discard(zv_request_t *zvr)
tx = dmu_tx_create(zv->zv_objset); tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) { if (error != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {

View File

@ -880,7 +880,7 @@ dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
* reduction in space used. * reduction in space used.
*/ */
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err) { if (err) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (err); return (err);
@ -971,7 +971,7 @@ dmu_free_long_object(objset_t *os, uint64_t object)
dmu_tx_hold_bonus(tx, object); dmu_tx_hold_bonus(tx, object);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END); dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err == 0) { if (err == 0) {
if (err == 0) if (err == 0)
err = dmu_object_free(os, object, tx); err = dmu_object_free(os, object, tx);
@ -1648,7 +1648,7 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_space(tx, zgd->zgd_db->db_size); dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
if (dmu_tx_assign(tx, TXG_WAIT) != 0) { if (dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT) != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
/* Make zl_get_data do txg_waited_synced() */ /* Make zl_get_data do txg_waited_synced() */
return (SET_ERROR(EIO)); return (SET_ERROR(EIO));

View File

@ -2336,7 +2336,7 @@ dmu_objset_space_upgrade(objset_t *os)
continue; continue;
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, obj); dmu_tx_hold_bonus(tx, obj);
objerr = dmu_tx_assign(tx, TXG_WAIT); objerr = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (objerr != 0) { if (objerr != 0) {
dmu_buf_rele(db, FTAG); dmu_buf_rele(db, FTAG);
dmu_tx_abort(tx); dmu_tx_abort(tx);

View File

@ -1733,7 +1733,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
tx = dmu_tx_create(rwa->os); tx = dmu_tx_create(rwa->os);
dmu_tx_hold_bonus(tx, object_to_hold); dmu_tx_hold_bonus(tx, object_to_hold);
dmu_tx_hold_write(tx, object_to_hold, 0, 0); dmu_tx_hold_write(tx, object_to_hold, 0, 0);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (err); return (err);
@ -1925,7 +1925,7 @@ flush_write_batch_impl(struct receive_writer_arg *rwa)
dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset, dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
last_drrw->drr_offset - first_drrw->drr_offset + last_drrw->drr_offset - first_drrw->drr_offset +
last_drrw->drr_logical_size); last_drrw->drr_logical_size);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
dnode_rele(dn, FTAG); dnode_rele(dn, FTAG);
@ -2151,7 +2151,7 @@ receive_write_embedded(struct receive_writer_arg *rwa,
dmu_tx_hold_write(tx, drrwe->drr_object, dmu_tx_hold_write(tx, drrwe->drr_object,
drrwe->drr_offset, drrwe->drr_length); drrwe->drr_offset, drrwe->drr_length);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (err); return (err);
@ -2214,7 +2214,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
dmu_tx_hold_spill(tx, db->db_object); dmu_tx_hold_spill(tx, db->db_object);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
dmu_buf_rele(db, FTAG); dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG); dmu_buf_rele(db_spill, FTAG);

View File

@ -565,7 +565,7 @@ commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node)); dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
int err = dmu_tx_assign(tx, TXG_WAIT); int err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
ASSERT(spa_exiting_any(os->os_spa)); ASSERT(spa_exiting_any(os->os_spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);

View File

@ -855,7 +855,7 @@ dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
* decreasing performance. * decreasing performance.
*/ */
static int static int
dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how) dmu_tx_try_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags)
{ {
spa_t *spa = tx->tx_pool->dp_spa; spa_t *spa = tx->tx_pool->dp_spa;
@ -869,7 +869,7 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
if (spa_suspended(spa)) { if (spa_suspended(spa)) {
DMU_TX_STAT_BUMP(dmu_tx_suspended); DMU_TX_STAT_BUMP(dmu_tx_suspended);
if (txg_how & TXG_NOSUSPEND) if (flags & DMU_TX_ASSIGN_NOSUSPEND)
return (SET_ERROR(EAGAIN)); return (SET_ERROR(EAGAIN));
/* /*
@ -888,11 +888,11 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
* Otherwise, return EIO so that an error can get * Otherwise, return EIO so that an error can get
* propagated back to the VOP calls. * propagated back to the VOP calls.
* *
* Note that we always honor the txg_how flag regardless * Note that we always honor the DMU_TX_ASSIGN_WAIT flag
* of the failuremode setting. * regardless of the failuremode setting.
*/ */
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE && if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
!(txg_how & TXG_WAIT)) !(flags & DMU_TX_ASSIGN_WAIT))
return (SET_ERROR(EIO)); return (SET_ERROR(EIO));
return (SET_ERROR(ERESTART)); return (SET_ERROR(ERESTART));
@ -1011,29 +1011,28 @@ dmu_tx_unassign(dmu_tx_t *tx)
static void dmu_tx_wait_flags(dmu_tx_t *, txg_wait_flag_t); static void dmu_tx_wait_flags(dmu_tx_t *, txg_wait_flag_t);
/* /*
* Assign tx to a transaction group; txg_how is a bitmask: * Assign tx to a transaction group; "flags" is a bitmask:
* *
* If TXG_WAIT is set and the currently open txg is full, this function * If DMU_TX_ASSIGN_WAIT is set and the currently open txg is full, this
* will wait until there's a new txg. This should be used when no locks * function will wait until there's a new txg. This should be used when no
* are being held. With this bit set, this function will only fail if * locks are being held. With this bit set, this function will only fail if
* we're truly out of space (or over quota). * we're truly out of space (or over quota).
* *
* If TXG_WAIT is *not* set and we can't assign into the currently open * If DMU_TX_ASSIGN_WAIT is *not* set and we can't assign into the currently
* txg without blocking, this function will return immediately with * open txg without blocking, this function will return immediately with
* ERESTART. This should be used whenever locks are being held. On an * ERESTART. This should be used whenever locks are being held. On an ERESTART
* ERESTART error, the caller should drop all locks, call dmu_tx_wait(), * error, the caller should drop all locks, call dmu_tx_wait(), and try again.
* and try again.
* *
* If TXG_NOTHROTTLE is set, this indicates that this tx should not be * If DMU_TX_ASSIGN_NOTHROTTLE is set, this indicates that this tx should not be
* delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for * delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
* details on the throttle). This is used by the VFS operations, after * details on the throttle). This is used by the VFS operations, after
* they have already called dmu_tx_wait() (though most likely on a * they have already called dmu_tx_wait() (though most likely on a
* different tx). * different tx).
* *
* If TXG_NOSUSPEND is set, this indicates that this request must return * If DMU_TX_ASSIGN_NOSUSPEND is set, this indicates that this request must
* EAGAIN if the pool becomes suspended while it is in progress. This * return EAGAIN if the pool becomes suspended while it is in progress. This
* ensures that the request does not inadvertently cause conditions that * ensures that the request does not inadvertently cause conditions that cannot
* cannot be unwound. * be unwound.
* *
* It is guaranteed that subsequent successful calls to dmu_tx_assign() * It is guaranteed that subsequent successful calls to dmu_tx_assign()
* will assign the tx to monotonically increasing txgs. Of course this is * will assign the tx to monotonically increasing txgs. Of course this is
@ -1052,28 +1051,37 @@ static void dmu_tx_wait_flags(dmu_tx_t *, txg_wait_flag_t);
* 1 <- dmu_tx_get_txg(T3) * 1 <- dmu_tx_get_txg(T3)
*/ */
int int
dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how) dmu_tx_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags)
{ {
int err; int err;
ASSERT(tx->tx_txg == 0); ASSERT(tx->tx_txg == 0);
ASSERT0(txg_how & ~(TXG_NOSUSPEND | TXG_WAIT | TXG_NOTHROTTLE)); ASSERT0(flags & ~(DMU_TX_ASSIGN_NOSUSPEND | DMU_TX_ASSIGN_WAIT |
DMU_TX_ASSIGN_NOTHROTTLE));
ASSERT(!dsl_pool_sync_context(tx->tx_pool)); ASSERT(!dsl_pool_sync_context(tx->tx_pool));
/* If we might wait, we must not hold the config lock. */ /* If we might wait, we must not hold the config lock. */
IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool)); IMPLY((flags & DMU_TX_ASSIGN_WAIT), !dsl_pool_config_held(tx->tx_pool));
if ((txg_how & TXG_NOTHROTTLE)) if ((flags & DMU_TX_ASSIGN_NOTHROTTLE))
tx->tx_dirty_delayed = B_TRUE; tx->tx_dirty_delayed = B_TRUE;
while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) { while ((err = dmu_tx_try_assign(tx, flags)) != 0) {
dmu_tx_unassign(tx); dmu_tx_unassign(tx);
if (err != ERESTART || !(txg_how & TXG_WAIT)) if (err != ERESTART || !(flags & DMU_TX_ASSIGN_WAIT))
return (err); return (err);
dmu_tx_wait_flags(tx, /*
(txg_how & TXG_NOSUSPEND) ? TXG_WAIT_F_NOSUSPEND : 0); * Wait until there's room in this txg, or until its been
* synced out and a new one is available. We pass the NOSUSPEND
* flag down if its set; if the pool suspends while we're
* waiting for the txg, this will return and we'll loop and end
* up back in dmu_tx_try_assign, which will deal with the
* suspension appropriately.
*/
dmu_tx_wait_flags(tx, (flags & DMU_TX_ASSIGN_NOSUSPEND)
? TXG_WAIT_F_NOSUSPEND : 0);
} }
txg_rele_to_quiesce(&tx->tx_txgh); txg_rele_to_quiesce(&tx->tx_txgh);
@ -1082,7 +1090,7 @@ dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
} }
static void static void
dmu_tx_wait_flags(dmu_tx_t *tx, txg_wait_flag_t how) dmu_tx_wait_flags(dmu_tx_t *tx, txg_wait_flag_t flags)
{ {
spa_t *spa = tx->tx_pool->dp_spa; spa_t *spa = tx->tx_pool->dp_spa;
dsl_pool_t *dp = tx->tx_pool; dsl_pool_t *dp = tx->tx_pool;
@ -1131,7 +1139,7 @@ dmu_tx_wait_flags(dmu_tx_t *tx, txg_wait_flag_t how)
* It's also possible the pool will be force exported, in * It's also possible the pool will be force exported, in
* which case we'll try again and notice this fact, and exit. * which case we'll try again and notice this fact, and exit.
*/ */
txg_wait_synced_tx(dp, spa_last_synced_txg(spa) + 1, tx, how); txg_wait_synced_tx(dp, spa_last_synced_txg(spa) + 1, tx, flags);
} else if (tx->tx_needassign_txh) { } else if (tx->tx_needassign_txh) {
dnode_t *dn = tx->tx_needassign_txh->txh_dnode; dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
@ -1149,7 +1157,7 @@ dmu_tx_wait_flags(dmu_tx_t *tx, txg_wait_flag_t how)
* It's also possible the pool will be force exported, in * It's also possible the pool will be force exported, in
* which case we'll try again and notice this fact, and exit. * which case we'll try again and notice this fact, and exit.
*/ */
txg_wait_synced_tx(dp, spa_last_synced_txg(spa) + 1, tx, how); txg_wait_synced_tx(dp, spa_last_synced_txg(spa) + 1, tx, flags);
} }
spa_tx_assign_add_nsecs(spa, gethrtime() - before); spa_tx_assign_add_nsecs(spa, gethrtime() - before);

View File

@ -1106,7 +1106,7 @@ dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
if (txg == 0) { if (txg == 0) {
dmu_tx_t *tx; dmu_tx_t *tx;
tx = dmu_tx_create_dd(dp->dp_mos_dir); tx = dmu_tx_create_dd(dp->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) { if (error != 0) {
ASSERT(spa_exiting_any(dp->dp_spa)); ASSERT(spa_exiting_any(dp->dp_spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);

View File

@ -57,7 +57,7 @@ dsl_sync_task_common(const char *pool, dsl_checkfunc_t *checkfunc,
top: top:
tx = dmu_tx_create_dd(dp->dp_mos_dir); tx = dmu_tx_create_dd(dp->dp_mos_dir);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
ASSERT(spa_exiting_any(spa)); ASSERT(spa_exiting_any(spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);

View File

@ -1538,7 +1538,7 @@ static void
spa_unload_log_sm_flush_all(spa_t *spa) spa_unload_log_sm_flush_all(spa_t *spa)
{ {
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
int txerr = dmu_tx_assign(tx, TXG_WAIT); int txerr = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (txerr != 0) { if (txerr != 0) {
ASSERT(spa_exiting_any(spa)); ASSERT(spa_exiting_any(spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -2833,7 +2833,8 @@ spa_livelist_condense_cb(void *arg, zthr_t *t)
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
dmu_tx_hold_space(tx, 1); dmu_tx_hold_space(tx, 1);
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE); err = dmu_tx_assign(tx,
DMU_TX_ASSIGN_NOWAIT | DMU_TX_ASSIGN_NOTHROTTLE);
if (err == 0) { if (err == 0) {
/* /*
* Prevent the condense zthr restarting before * Prevent the condense zthr restarting before
@ -7942,7 +7943,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
/* finally, update the original pool's config */ /* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa); txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) if (error != 0)
dmu_tx_abort(tx); dmu_tx_abort(tx);
for (c = 0; c < children; c++) { for (c = 0; c < children; c++) {

View File

@ -385,7 +385,7 @@ spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
} }
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
err = dmu_tx_assign(tx, TXG_WAIT | TXG_NOSUSPEND); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT | DMU_TX_ASSIGN_NOSUSPEND);
if (err) { if (err) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (err); return (err);
@ -556,7 +556,7 @@ spa_history_log_internal(spa_t *spa, const char *operation,
/* create a tx if we didn't get one */ /* create a tx if we didn't get one */
if (tx == NULL) { if (tx == NULL) {
htx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); htx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
if (dmu_tx_assign(htx, TXG_WAIT) != 0) { if (dmu_tx_assign(htx, DMU_TX_ASSIGN_WAIT) != 0) {
dmu_tx_abort(htx); dmu_tx_abort(htx);
return; return;
} }

View File

@ -570,7 +570,7 @@ spa_condense_indirect_commit_entry(spa_t *spa,
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count)); dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
if (dmu_tx_assign(tx, TXG_WAIT) != 0) { if (dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT) != 0) {
ASSERT(spa_exiting_any(spa)); ASSERT(spa_exiting_any(spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);
return; return;

View File

@ -134,7 +134,7 @@ vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
* export, which requires the namespace lock) to recover. * export, which requires the namespace lock) to recover.
*/ */
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); VERIFY0(dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT));
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync, dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync,
guid, tx); guid, tx);
@ -216,7 +216,7 @@ vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
mutex_exit(&vd->vdev_initialize_io_lock); mutex_exit(&vd->vdev_initialize_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); VERIFY0(dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT));
uint64_t txg = dmu_tx_get_txg(tx); uint64_t txg = dmu_tx_get_txg(tx);
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER); spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);

View File

@ -283,7 +283,7 @@ vdev_rebuild_initiate(vdev_t *vd)
ASSERT(!vd->vdev_rebuilding); ASSERT(!vd->vdev_rebuilding);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
int err = dmu_tx_assign(tx, TXG_WAIT); int err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
ASSERT(spa_exiting_any(vd->vdev_spa)); ASSERT(spa_exiting_any(vd->vdev_spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -576,7 +576,7 @@ vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size)
mutex_exit(&vr->vr_io_lock); mutex_exit(&vr->vr_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
int err = dmu_tx_assign(tx, TXG_WAIT); int err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
ASSERT(spa_exiting_any(spa)); ASSERT(spa_exiting_any(spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -914,7 +914,7 @@ vdev_rebuild_thread(void *arg)
dsl_pool_t *dp = spa_get_dsl(spa); dsl_pool_t *dp = spa_get_dsl(spa);
dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir);
int txerr = dmu_tx_assign(tx, TXG_WAIT); int txerr = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
mutex_enter(&vd->vdev_rebuild_lock); mutex_enter(&vd->vdev_rebuild_lock);
if (txerr != 0) { if (txerr != 0) {

View File

@ -1535,7 +1535,7 @@ spa_vdev_remove_thread(void *arg)
* If a tx can't be assigned, just punt and wait for * If a tx can't be assigned, just punt and wait for
* the next round. This must be an exiting spa. * the next round. This must be an exiting spa.
*/ */
if (dmu_tx_assign(tx, TXG_WAIT) != 0) { if (dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT) != 0) {
ASSERT(spa_exiting_any(spa)); ASSERT(spa_exiting_any(spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);
goto done; goto done;

View File

@ -317,7 +317,7 @@ vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state,
vd->vdev_trim_state = new_state; vd->vdev_trim_state = new_state;
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
int txerr = dmu_tx_assign(tx, TXG_WAIT); int txerr = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (txerr != 0) { if (txerr != 0) {
ASSERT(spa_exiting_any(spa)); ASSERT(spa_exiting_any(spa));
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -509,7 +509,7 @@ vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size)
mutex_exit(&vd->vdev_trim_io_lock); mutex_exit(&vd->vdev_trim_io_lock);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
int err = dmu_tx_assign(tx, TXG_WAIT); int err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err != 0) { if (err != 0) {
ASSERT(spa_exiting_any(spa)); ASSERT(spa_exiting_any(spa));
mutex_enter(&vd->vdev_trim_io_lock); mutex_enter(&vd->vdev_trim_io_lock);

View File

@ -338,7 +338,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
} }
if (fuid_dirtied) if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx); zfs_fuid_txhold(zfsvfs, tx);
err = dmu_tx_assign(tx, TXG_WAIT); err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (err) { if (err) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (err); return (err);

View File

@ -756,7 +756,7 @@ top:
zp->z_size = end; zp->z_size = end;
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
zrele(zp); zrele(zp);
if (error == ERESTART) { if (error == ERESTART) {

View File

@ -248,7 +248,7 @@ zfs_sa_set_xattr(znode_t *zp)
dmu_tx_hold_sa_create(tx, size); dmu_tx_hold_sa_create(tx, size);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE); dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {

View File

@ -556,7 +556,7 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
MIN(n, max_blksz)); MIN(n, max_blksz));
DB_DNODE_EXIT(db); DB_DNODE_EXIT(db);
zfs_sa_upgrade_txholds(tx, zp); zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
if (abuf != NULL) if (abuf != NULL)

View File

@ -695,7 +695,7 @@ zil_create(zilog_t *zilog)
*/ */
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) { if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os); tx = dmu_tx_create(zilog->zl_os);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) { if (error != 0) {
ASSERT(dmu_objset_exiting(zilog->zl_os)); ASSERT(dmu_objset_exiting(zilog->zl_os));
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -768,7 +768,7 @@ zil_destroy(zilog_t *zilog, boolean_t keep_first)
return; return;
tx = dmu_tx_create(zilog->zl_os); tx = dmu_tx_create(zilog->zl_os);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) { if (error != 0) {
ASSERT(dmu_objset_exiting(zilog->zl_os)); ASSERT(dmu_objset_exiting(zilog->zl_os));
dmu_tx_abort(tx); dmu_tx_abort(tx);
@ -1530,9 +1530,10 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
* Since we are not going to create any new dirty data, and we * Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we * can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We * should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism. * use DMU_TX_ASSIGN_NOTHROTTLE to bypass the delay mechanism.
*/ */
if (dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE) != 0) { if (dmu_tx_assign(tx,
DMU_TX_ASSIGN_WAIT | DMU_TX_ASSIGN_NOTHROTTLE) != 0) {
ASSERT(dmu_objset_exiting(zilog->zl_os)); ASSERT(dmu_objset_exiting(zilog->zl_os));
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (NULL); return (NULL);
@ -2829,7 +2830,7 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
{ {
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os); dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
if (dmu_tx_assign(tx, TXG_WAIT) != 0) { if (dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT) != 0) {
ASSERT(dmu_objset_exiting(zilog->zl_os)); ASSERT(dmu_objset_exiting(zilog->zl_os));
dmu_tx_abort(tx); dmu_tx_abort(tx);
return; return;

View File

@ -281,7 +281,7 @@ zvol_update_volsize(uint64_t volsize, objset_t *os)
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL); dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
return (SET_ERROR(error)); return (SET_ERROR(error));
@ -435,7 +435,7 @@ zvol_set_volblocksize(const char *name, uint64_t volblocksize)
tx = dmu_tx_create(zv->zv_objset); tx = dmu_tx_create(zv->zv_objset);
dmu_tx_hold_bonus(tx, ZVOL_OBJ); dmu_tx_hold_bonus(tx, ZVOL_OBJ);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {
@ -473,7 +473,7 @@ zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset); dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx); dmu_tx_mark_netfree(tx);
int error = dmu_tx_assign(tx, TXG_WAIT); int error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error != 0) { if (error != 0) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {
@ -518,7 +518,7 @@ zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
tx = dmu_tx_create(os); tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length); dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
error = dmu_tx_assign(tx, TXG_WAIT); error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
if (error) { if (error) {
dmu_tx_abort(tx); dmu_tx_abort(tx);
} else { } else {