dmu: rename dmu_tx_assign flags
Their names clash with those for txg_wait_synced_tx, and they aren't directly compatible, leading to confusion. Signed-off-by: Rob Norris <rob.norris@klarasystems.com> (cherry picked from commit 1f0fb1dae7c1e84de3b39e669e09b8b3d5b80b87)
This commit is contained in:
parent
b0d75996ba
commit
48a48059c7
|
@ -1745,10 +1745,11 @@ ztest_zd_fini(ztest_ds_t *zd)
|
|||
ztest_rll_destroy(&zd->zd_range_lock[l]);
|
||||
}
|
||||
|
||||
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
|
||||
#define DMU_TX_ASSIGN_MIGHTWAIT \
|
||||
(ztest_random(10) == 0 ? DMU_TX_ASSIGN_NOWAIT : DMU_TX_ASSIGN_WAIT)
|
||||
|
||||
static uint64_t
|
||||
ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
|
||||
ztest_tx_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags, const char *tag)
|
||||
{
|
||||
uint64_t txg;
|
||||
int error;
|
||||
|
@ -1756,10 +1757,10 @@ ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
|
|||
/*
|
||||
* Attempt to assign tx to some transaction group.
|
||||
*/
|
||||
error = dmu_tx_assign(tx, txg_how);
|
||||
error = dmu_tx_assign(tx, flags);
|
||||
if (error) {
|
||||
if (error == ERESTART) {
|
||||
ASSERT3U(txg_how, ==, TXG_NOWAIT);
|
||||
ASSERT3U(flags, ==, DMU_TX_ASSIGN_NOWAIT);
|
||||
dmu_tx_wait(tx);
|
||||
} else {
|
||||
ASSERT3U(error, ==, ENOSPC);
|
||||
|
@ -2005,7 +2006,7 @@ ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap)
|
|||
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
|
||||
}
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
|
||||
if (txg == 0)
|
||||
return (ENOSPC);
|
||||
|
||||
|
@ -2095,7 +2096,7 @@ ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
|
|||
dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
|
||||
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
ztest_object_unlock(zd, object);
|
||||
return (ENOSPC);
|
||||
|
@ -2177,7 +2178,7 @@ ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
|||
P2PHASE(offset, length) == 0)
|
||||
abuf = dmu_request_arcbuf(db, length);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
if (abuf != NULL)
|
||||
dmu_return_arcbuf(abuf);
|
||||
|
@ -2267,7 +2268,7 @@ ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
|
|||
|
||||
dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
ztest_range_unlock(rl);
|
||||
ztest_object_unlock(zd, lr->lr_foid);
|
||||
|
@ -2308,7 +2309,7 @@ ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
|
|||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_bonus(tx, lr->lr_foid);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
dmu_buf_rele(db, FTAG);
|
||||
ztest_object_unlock(zd, lr->lr_foid);
|
||||
|
@ -2722,7 +2723,7 @@ ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
|
|||
|
||||
dmu_tx_hold_write(tx, object, offset, size);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_WAIT, FTAG);
|
||||
|
||||
if (txg != 0) {
|
||||
dmu_prealloc(os, object, offset, size, tx);
|
||||
|
@ -4770,7 +4771,7 @@ ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
|
|||
/* This accounts for setting the checksum/compression. */
|
||||
dmu_tx_hold_bonus(tx, bigobj);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(packbuf, packsize);
|
||||
umem_free(bigbuf, bigsize);
|
||||
|
@ -5060,7 +5061,7 @@ ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
|
|||
dmu_tx_hold_write(tx, packobj, packoff, packsize);
|
||||
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
|
||||
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(packbuf, packsize);
|
||||
umem_free(bigbuf, bigsize);
|
||||
|
@ -5281,7 +5282,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
|||
*/
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
for (i = 0; i < 2; i++) {
|
||||
|
@ -5349,7 +5350,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
|||
*/
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
|
||||
|
@ -5382,7 +5383,7 @@ ztest_zap(ztest_ds_t *zd, uint64_t id)
|
|||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
VERIFY0(zap_remove(os, object, txgname, tx));
|
||||
|
@ -5427,7 +5428,7 @@ ztest_fzap(ztest_ds_t *zd, uint64_t id)
|
|||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, name);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
|
||||
if (txg == 0)
|
||||
goto out;
|
||||
error = zap_add(os, object, name, sizeof (uint64_t), 1,
|
||||
|
@ -5498,7 +5499,7 @@ ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
|
|||
if (i >= 2) {
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
|
||||
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
|
||||
txg = ztest_tx_assign(tx, DMU_TX_ASSIGN_MIGHTWAIT, FTAG);
|
||||
if (txg == 0) {
|
||||
umem_free(od, sizeof (ztest_od_t));
|
||||
return;
|
||||
|
@ -5663,7 +5664,7 @@ ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
|
|||
error = -1;
|
||||
|
||||
if (!error)
|
||||
error = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_NOWAIT);
|
||||
|
||||
txg = error ? 0 : dmu_tx_get_txg(tx);
|
||||
|
||||
|
|
|
@ -269,14 +269,16 @@ typedef enum dmu_object_type {
|
|||
} dmu_object_type_t;
|
||||
|
||||
/*
|
||||
* These flags are intended to be used to specify the "txg_how"
|
||||
* These flags are intended to be used to specify the "flags"
|
||||
* parameter when calling the dmu_tx_assign() function. See the comment
|
||||
* above dmu_tx_assign() for more details on the meaning of these flags.
|
||||
*/
|
||||
#define TXG_NOWAIT (0ULL)
|
||||
#define TXG_WAIT (1ULL<<0)
|
||||
#define TXG_NOTHROTTLE (1ULL<<1)
|
||||
#define TXG_NOSUSPEND (1ULL<<2)
|
||||
typedef enum {
|
||||
DMU_TX_ASSIGN_NOWAIT = 0,
|
||||
DMU_TX_ASSIGN_WAIT = (1U << 0),
|
||||
DMU_TX_ASSIGN_NOTHROTTLE = (1U << 1),
|
||||
DMU_TX_ASSIGN_NOSUSPEND = (1U << 2),
|
||||
} dmu_tx_assign_flag_t;
|
||||
|
||||
void byteswap_uint64_array(void *buf, size_t size);
|
||||
void byteswap_uint32_array(void *buf, size_t size);
|
||||
|
@ -785,7 +787,7 @@ void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object);
|
|||
void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow);
|
||||
void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size);
|
||||
void dmu_tx_abort(dmu_tx_t *tx);
|
||||
int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how);
|
||||
int dmu_tx_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags);
|
||||
void dmu_tx_wait(dmu_tx_t *tx);
|
||||
void dmu_tx_commit(dmu_tx_t *tx);
|
||||
void dmu_tx_mark_netfree(dmu_tx_t *tx);
|
||||
|
|
|
@ -139,7 +139,7 @@ extern dmu_tx_stats_t dmu_tx_stats;
|
|||
* These routines are defined in dmu.h, and are called by the user.
|
||||
*/
|
||||
dmu_tx_t *dmu_tx_create(objset_t *dd);
|
||||
int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how);
|
||||
int dmu_tx_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags);
|
||||
void dmu_tx_commit(dmu_tx_t *tx);
|
||||
void dmu_tx_abort(dmu_tx_t *tx);
|
||||
uint64_t dmu_tx_get_txg(dmu_tx_t *tx);
|
||||
|
|
|
@ -1998,7 +1998,7 @@ top:
|
|||
}
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
mutex_exit(&zp->z_acl_lock);
|
||||
|
||||
|
|
|
@ -336,7 +336,7 @@ zfs_unlinked_drain(zfsvfs_t *zfsvfs)
|
|||
if (zp->z_links != 0) {
|
||||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
vput(ZTOV(zp));
|
||||
|
@ -398,7 +398,7 @@ zfs_purgedir(znode_t *dzp)
|
|||
/* Is this really needed ? */
|
||||
zfs_sa_upgrade_txholds(tx, xzp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
vput(ZTOV(xzp));
|
||||
|
@ -494,7 +494,7 @@ zfs_rmnode(znode_t *zp)
|
|||
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
/*
|
||||
* Not enough space to delete the file. Leave it in the
|
||||
|
@ -827,7 +827,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xvpp, cred_t *cr)
|
|||
fuid_dirtied = zfsvfs->z_fuid_dirty;
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
|
|
|
@ -2159,7 +2159,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
|
|||
ZFS_SA_ATTRS);
|
||||
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
|
||||
}
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
return (error);
|
||||
|
|
|
@ -153,14 +153,14 @@ typedef ulong_t cookie_t;
|
|||
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
|
||||
* as they can span dmu_tx_assign() calls.
|
||||
*
|
||||
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
|
||||
* dmu_tx_assign(). This is critical because we don't want to block
|
||||
* (4) If ZPL locks are held, pass DMU_TX_ASSIGN_NOWAIT as the second argument
|
||||
* to dmu_tx_assign(). This is critical because we don't want to block
|
||||
* while holding locks.
|
||||
*
|
||||
* If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
|
||||
* reduces lock contention and CPU usage when we must wait (note that if
|
||||
* throughput is constrained by the storage, nearly every transaction
|
||||
* must wait).
|
||||
* If no ZPL locks are held (aside from ZFS_ENTER()), use
|
||||
* DMU_TX_ASSIGN_WAIT. This reduces lock contention and CPU usage when we
|
||||
* must wait (note that if throughput is constrained by the storage, nearly
|
||||
* every transaction must wait).
|
||||
*
|
||||
* Note, in particular, that if a lock is sometimes acquired before
|
||||
* the tx assigns, and sometimes after (e.g. z_lock), then failing
|
||||
|
@ -168,15 +168,16 @@ typedef ulong_t cookie_t;
|
|||
*
|
||||
* Thread A has grabbed a lock before calling dmu_tx_assign().
|
||||
* Thread B is in an already-assigned tx, and blocks for this lock.
|
||||
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
|
||||
* forever, because the previous txg can't quiesce until B's tx commits.
|
||||
* Thread A calls dmu_tx_assign(DMU_TX_ASSIGN_WAIT) and blocks in
|
||||
* txg_wait_open() forever, because the previous txg can't quiesce until
|
||||
* B's tx commits.
|
||||
*
|
||||
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
|
||||
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
|
||||
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
|
||||
* to indicate that this operation has already called dmu_tx_wait().
|
||||
* This will ensure that we don't retry forever, waiting a short bit
|
||||
* each time.
|
||||
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is
|
||||
* DMU_TX_ASSIGN_NOWAIT, then drop all locks, call dmu_tx_wait(), and try
|
||||
* again. On subsequent calls to dmu_tx_assign(), pass
|
||||
* DMU_TX_ASSIGN_NOTHROTTLE in addition to DMU_TX_ASSIGN_NOWAIT, to
|
||||
* indicate that this operation has already called dmu_tx_wait(). This
|
||||
* will ensure that we don't retry forever, waiting a short bit each time.
|
||||
*
|
||||
* (5) If the operation succeeded, generate the intent log entry for it
|
||||
* before dropping locks. This ensures that the ordering of events
|
||||
|
@ -198,7 +199,8 @@ typedef ulong_t cookie_t;
|
|||
* rw_enter(...); // grab any other locks you need
|
||||
* tx = dmu_tx_create(...); // get DMU tx
|
||||
* dmu_tx_hold_*(); // hold each object you might modify
|
||||
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
* error = dmu_tx_assign(tx,
|
||||
* (waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
* if (error) {
|
||||
* rw_exit(...); // drop locks
|
||||
* zfs_dirent_unlock(dl); // unlock directory entry
|
||||
|
@ -1162,7 +1164,7 @@ zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
|
|||
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
|
||||
0, acl_ids.z_aclp->z_acl_bytes);
|
||||
}
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -1287,7 +1289,7 @@ zfs_remove_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
|
|||
*/
|
||||
dmu_tx_mark_netfree(tx);
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
|
@ -1502,7 +1504,7 @@ zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
|
|||
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
|
||||
ZFS_SA_BASE_ATTR_SIZE);
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -1605,7 +1607,7 @@ zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
|
|||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
zfs_sa_upgrade_txholds(tx, dzp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
|
@ -2694,7 +2696,7 @@ zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
|
|||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -3396,7 +3398,7 @@ zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
|
|||
|
||||
zfs_sa_upgrade_txholds(tx, szp);
|
||||
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out_seq;
|
||||
|
@ -3592,7 +3594,7 @@ zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
|
|||
}
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -3797,7 +3799,7 @@ zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
|
|||
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
|
||||
zfs_sa_upgrade_txholds(tx, szp);
|
||||
zfs_sa_upgrade_txholds(tx, tdzp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
ZFS_EXIT(zfsvfs);
|
||||
|
@ -3926,7 +3928,7 @@ zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
|
|||
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
@ -4255,7 +4257,7 @@ zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
|
|||
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out;
|
||||
|
|
|
@ -1428,7 +1428,7 @@ zfs_extend(znode_t *zp, uint64_t end)
|
|||
newblksz = 0;
|
||||
}
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
|
@ -1546,7 +1546,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
|
|||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
|
@ -1627,7 +1627,7 @@ log:
|
|||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
return (error);
|
||||
|
|
|
@ -677,7 +677,7 @@ zvol_geom_bio_strategy(struct bio *bp)
|
|||
|
||||
if (bp->bio_cmd == BIO_DELETE) {
|
||||
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
@ -697,7 +697,7 @@ zvol_geom_bio_strategy(struct bio *bp)
|
|||
} else {
|
||||
dmu_tx_t *tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, size);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
@ -842,7 +842,7 @@ zvol_cdev_write(struct cdev *dev, struct uio *uio_s, int ioflag)
|
|||
bytes = volsize - off;
|
||||
|
||||
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
break;
|
||||
|
@ -1104,7 +1104,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
|
|||
lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, length,
|
||||
RL_WRITER);
|
||||
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0) {
|
||||
sync = FALSE;
|
||||
dmu_tx_abort(tx);
|
||||
|
|
|
@ -2164,7 +2164,7 @@ top:
|
|||
}
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
mutex_exit(&zp->z_acl_lock);
|
||||
mutex_exit(&zp->z_lock);
|
||||
|
|
|
@ -617,7 +617,7 @@ zfs_purgedir(znode_t *dzp)
|
|||
/* Is this really needed ? */
|
||||
zfs_sa_upgrade_txholds(tx, xzp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_zrele_async(xzp);
|
||||
|
@ -717,7 +717,7 @@ zfs_rmnode(znode_t *zp)
|
|||
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
|
||||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
/*
|
||||
* Not enough space to delete the file. Leave it in the
|
||||
|
@ -1084,7 +1084,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
|
|||
fuid_dirtied = zfsvfs->z_fuid_dirty;
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
zfs_acl_ids_free(&acl_ids);
|
||||
dmu_tx_abort(tx);
|
||||
|
|
|
@ -2006,7 +2006,7 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
|
|||
ZFS_SA_ATTRS);
|
||||
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
|
||||
}
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
return (error);
|
||||
|
|
|
@ -103,14 +103,14 @@
|
|||
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
|
||||
* as they can span dmu_tx_assign() calls.
|
||||
*
|
||||
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
|
||||
* dmu_tx_assign(). This is critical because we don't want to block
|
||||
* (4) If ZPL locks are held, pass DMU_TX_ASSIGN_NOWAIT as the second argument
|
||||
* to dmu_tx_assign(). This is critical because we don't want to block
|
||||
* while holding locks.
|
||||
*
|
||||
* If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
|
||||
* reduces lock contention and CPU usage when we must wait (note that if
|
||||
* throughput is constrained by the storage, nearly every transaction
|
||||
* must wait).
|
||||
* If no ZPL locks are held (aside from ZFS_ENTER()), use
|
||||
* DMU_TX_ASSIGN_WAIT. This reduces lock contention and CPU usage when we
|
||||
* must wait (note that if throughput is constrained by the storage, nearly
|
||||
* every transaction must wait).
|
||||
*
|
||||
* Note, in particular, that if a lock is sometimes acquired before
|
||||
* the tx assigns, and sometimes after (e.g. z_lock), then failing
|
||||
|
@ -118,14 +118,16 @@
|
|||
*
|
||||
* Thread A has grabbed a lock before calling dmu_tx_assign().
|
||||
* Thread B is in an already-assigned tx, and blocks for this lock.
|
||||
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
|
||||
* forever, because the previous txg can't quiesce until B's tx commits.
|
||||
* Thread A calls dmu_tx_assign(DMU_TX_ASSIGN_WAIT) and blocks in
|
||||
* txg_wait_open() forever, because the previous txg can't quiesce until
|
||||
* B's tx commits.
|
||||
*
|
||||
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
|
||||
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
|
||||
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
|
||||
* to indicate that this operation has already called dmu_tx_wait().
|
||||
* This will ensure that we don't retry forever, waiting a short bit
|
||||
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is
|
||||
* DMU_TX_ASSIGN_NOWAIT, then drop all locks, call dmu_tx_wait(), and try
|
||||
* again. On subsequent calls to dmu_tx_assign(), pass
|
||||
* DMU_TX_ASSIGN_NOTHROTTLE in addition to DMU_TX_ASSIGN_NOWAIT, to
|
||||
* indicate that this operation has already called dmu_tx_wait(). This
|
||||
* will ensure that we don't retry forever, waiting a short bit
|
||||
* each time.
|
||||
*
|
||||
* (5) If the operation succeeded, generate the intent log entry for it
|
||||
|
@ -148,7 +150,8 @@
|
|||
* rw_enter(...); // grab any other locks you need
|
||||
* tx = dmu_tx_create(...); // get DMU tx
|
||||
* dmu_tx_hold_*(); // hold each object you might modify
|
||||
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
* error = dmu_tx_assign(tx,
|
||||
* (waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
* if (error) {
|
||||
* rw_exit(...); // drop locks
|
||||
* zfs_dirent_unlock(dl); // unlock directory entry
|
||||
|
@ -699,8 +702,8 @@ top:
|
|||
0, acl_ids.z_aclp->z_acl_bytes);
|
||||
}
|
||||
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx, (waited ?
|
||||
DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
|
@ -890,7 +893,8 @@ top:
|
|||
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
|
||||
0, acl_ids.z_aclp->z_acl_bytes);
|
||||
}
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
if (error == ERESTART) {
|
||||
waited = B_TRUE;
|
||||
|
@ -1055,7 +1059,8 @@ top:
|
|||
*/
|
||||
dmu_tx_mark_netfree(tx);
|
||||
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
|
@ -1298,7 +1303,8 @@ top:
|
|||
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
|
||||
ZFS_SA_BASE_ATTR_SIZE);
|
||||
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
|
@ -1443,7 +1449,8 @@ top:
|
|||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
zfs_sa_upgrade_txholds(tx, dzp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
rw_exit(&zp->z_parent_lock);
|
||||
rw_exit(&zp->z_name_lock);
|
||||
|
@ -1768,7 +1775,7 @@ zfs_setattr_dir(znode_t *dzp)
|
|||
else
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
|
@ -2320,7 +2327,7 @@ top:
|
|||
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -2929,7 +2936,8 @@ top:
|
|||
|
||||
zfs_sa_upgrade_txholds(tx, szp);
|
||||
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
if (zl != NULL)
|
||||
zfs_rename_unlock(&zl);
|
||||
|
@ -3131,7 +3139,8 @@ top:
|
|||
}
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
|
@ -3378,7 +3387,8 @@ top:
|
|||
|
||||
zfs_sa_upgrade_txholds(tx, szp);
|
||||
zfs_sa_upgrade_txholds(tx, tdzp);
|
||||
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
|
||||
error = dmu_tx_assign(tx,
|
||||
(waited ? DMU_TX_ASSIGN_NOTHROTTLE : 0) | DMU_TX_ASSIGN_NOWAIT);
|
||||
if (error) {
|
||||
zfs_dirent_unlock(dl);
|
||||
if (error == ERESTART) {
|
||||
|
@ -3590,7 +3600,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
|
|||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_NOWAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_NOWAIT);
|
||||
if (err != 0) {
|
||||
if (err == ERESTART)
|
||||
dmu_tx_wait(tx);
|
||||
|
@ -3687,7 +3697,7 @@ zfs_dirty_inode(struct inode *ip, int flags)
|
|||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out;
|
||||
|
@ -3744,7 +3754,7 @@ zfs_inactive(struct inode *ip)
|
|||
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
|
|
@ -1528,7 +1528,7 @@ zfs_extend(znode_t *zp, uint64_t end)
|
|||
newblksz = 0;
|
||||
}
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
|
@ -1714,7 +1714,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
|
|||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
zfs_rangelock_exit(lr);
|
||||
|
@ -1785,7 +1785,7 @@ log:
|
|||
tx = dmu_tx_create(zfsvfs->z_os);
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
goto out;
|
||||
|
|
|
@ -152,7 +152,7 @@ zvol_write(zv_request_t *zvr)
|
|||
dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
|
||||
|
||||
/* This will only fail for ENOSPC */
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
break;
|
||||
|
@ -242,7 +242,7 @@ zvol_discard(zv_request_t *zvr)
|
|||
|
||||
tx = dmu_tx_create(zv->zv_objset);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
|
|
@ -880,7 +880,7 @@ dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
|
|||
* reduction in space used.
|
||||
*/
|
||||
dmu_tx_mark_netfree(tx);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
|
@ -971,7 +971,7 @@ dmu_free_long_object(objset_t *os, uint64_t object)
|
|||
dmu_tx_hold_bonus(tx, object);
|
||||
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err == 0) {
|
||||
if (err == 0)
|
||||
err = dmu_object_free(os, object, tx);
|
||||
|
@ -1648,7 +1648,7 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
|
|||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
|
||||
if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
|
||||
if (dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT) != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
/* Make zl_get_data do txg_waited_synced() */
|
||||
return (SET_ERROR(EIO));
|
||||
|
|
|
@ -2336,7 +2336,7 @@ dmu_objset_space_upgrade(objset_t *os)
|
|||
continue;
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_bonus(tx, obj);
|
||||
objerr = dmu_tx_assign(tx, TXG_WAIT);
|
||||
objerr = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (objerr != 0) {
|
||||
dmu_buf_rele(db, FTAG);
|
||||
dmu_tx_abort(tx);
|
||||
|
|
|
@ -1733,7 +1733,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
|
|||
tx = dmu_tx_create(rwa->os);
|
||||
dmu_tx_hold_bonus(tx, object_to_hold);
|
||||
dmu_tx_hold_write(tx, object_to_hold, 0, 0);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
|
@ -1925,7 +1925,7 @@ flush_write_batch_impl(struct receive_writer_arg *rwa)
|
|||
dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
|
||||
last_drrw->drr_offset - first_drrw->drr_offset +
|
||||
last_drrw->drr_logical_size);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
dnode_rele(dn, FTAG);
|
||||
|
@ -2151,7 +2151,7 @@ receive_write_embedded(struct receive_writer_arg *rwa,
|
|||
|
||||
dmu_tx_hold_write(tx, drrwe->drr_object,
|
||||
drrwe->drr_offset, drrwe->drr_length);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
|
@ -2214,7 +2214,7 @@ receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
|
|||
|
||||
dmu_tx_hold_spill(tx, db->db_object);
|
||||
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
dmu_buf_rele(db, FTAG);
|
||||
dmu_buf_rele(db_spill, FTAG);
|
||||
|
|
|
@ -565,7 +565,7 @@ commit_rl_updates(objset_t *os, struct merge_data *md, uint64_t object,
|
|||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(os->os_spa)->dp_mos_dir);
|
||||
dmu_tx_hold_space(tx, sizeof (struct redact_block_list_node));
|
||||
|
||||
int err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
ASSERT(spa_exiting_any(os->os_spa));
|
||||
dmu_tx_abort(tx);
|
||||
|
|
|
@ -855,7 +855,7 @@ dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
|
|||
* decreasing performance.
|
||||
*/
|
||||
static int
|
||||
dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
|
||||
dmu_tx_try_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags)
|
||||
{
|
||||
spa_t *spa = tx->tx_pool->dp_spa;
|
||||
|
||||
|
@ -869,7 +869,7 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
|
|||
if (spa_suspended(spa)) {
|
||||
DMU_TX_STAT_BUMP(dmu_tx_suspended);
|
||||
|
||||
if (txg_how & TXG_NOSUSPEND)
|
||||
if (flags & DMU_TX_ASSIGN_NOSUSPEND)
|
||||
return (SET_ERROR(EAGAIN));
|
||||
|
||||
/*
|
||||
|
@ -888,11 +888,11 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
|
|||
* Otherwise, return EIO so that an error can get
|
||||
* propagated back to the VOP calls.
|
||||
*
|
||||
* Note that we always honor the txg_how flag regardless
|
||||
* of the failuremode setting.
|
||||
* Note that we always honor the DMU_TX_ASSIGN_WAIT flag
|
||||
* regardless of the failuremode setting.
|
||||
*/
|
||||
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
|
||||
!(txg_how & TXG_WAIT))
|
||||
!(flags & DMU_TX_ASSIGN_WAIT))
|
||||
return (SET_ERROR(EIO));
|
||||
|
||||
return (SET_ERROR(ERESTART));
|
||||
|
@ -1011,29 +1011,28 @@ dmu_tx_unassign(dmu_tx_t *tx)
|
|||
static void dmu_tx_wait_flags(dmu_tx_t *, txg_wait_flag_t);
|
||||
|
||||
/*
|
||||
* Assign tx to a transaction group; txg_how is a bitmask:
|
||||
* Assign tx to a transaction group; "flags" is a bitmask:
|
||||
*
|
||||
* If TXG_WAIT is set and the currently open txg is full, this function
|
||||
* will wait until there's a new txg. This should be used when no locks
|
||||
* are being held. With this bit set, this function will only fail if
|
||||
* If DMU_TX_ASSIGN_WAIT is set and the currently open txg is full, this
|
||||
* function will wait until there's a new txg. This should be used when no
|
||||
* locks are being held. With this bit set, this function will only fail if
|
||||
* we're truly out of space (or over quota).
|
||||
*
|
||||
* If TXG_WAIT is *not* set and we can't assign into the currently open
|
||||
* txg without blocking, this function will return immediately with
|
||||
* ERESTART. This should be used whenever locks are being held. On an
|
||||
* ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
|
||||
* and try again.
|
||||
* If DMU_TX_ASSIGN_WAIT is *not* set and we can't assign into the currently
|
||||
* open txg without blocking, this function will return immediately with
|
||||
* ERESTART. This should be used whenever locks are being held. On an ERESTART
|
||||
* error, the caller should drop all locks, call dmu_tx_wait(), and try again.
|
||||
*
|
||||
* If TXG_NOTHROTTLE is set, this indicates that this tx should not be
|
||||
* If DMU_TX_ASSIGN_NOTHROTTLE is set, this indicates that this tx should not be
|
||||
* delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
|
||||
* details on the throttle). This is used by the VFS operations, after
|
||||
* they have already called dmu_tx_wait() (though most likely on a
|
||||
* different tx).
|
||||
*
|
||||
* If TXG_NOSUSPEND is set, this indicates that this request must return
|
||||
* EAGAIN if the pool becomes suspended while it is in progress. This
|
||||
* ensures that the request does not inadvertently cause conditions that
|
||||
* cannot be unwound.
|
||||
* If DMU_TX_ASSIGN_NOSUSPEND is set, this indicates that this request must
|
||||
* return EAGAIN if the pool becomes suspended while it is in progress. This
|
||||
* ensures that the request does not inadvertently cause conditions that cannot
|
||||
* be unwound.
|
||||
*
|
||||
* It is guaranteed that subsequent successful calls to dmu_tx_assign()
|
||||
* will assign the tx to monotonically increasing txgs. Of course this is
|
||||
|
@ -1052,28 +1051,37 @@ static void dmu_tx_wait_flags(dmu_tx_t *, txg_wait_flag_t);
|
|||
* 1 <- dmu_tx_get_txg(T3)
|
||||
*/
|
||||
int
|
||||
dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
|
||||
dmu_tx_assign(dmu_tx_t *tx, dmu_tx_assign_flag_t flags)
|
||||
{
|
||||
int err;
|
||||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
ASSERT0(txg_how & ~(TXG_NOSUSPEND | TXG_WAIT | TXG_NOTHROTTLE));
|
||||
ASSERT0(flags & ~(DMU_TX_ASSIGN_NOSUSPEND | DMU_TX_ASSIGN_WAIT |
|
||||
DMU_TX_ASSIGN_NOTHROTTLE));
|
||||
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
|
||||
|
||||
/* If we might wait, we must not hold the config lock. */
|
||||
IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
|
||||
IMPLY((flags & DMU_TX_ASSIGN_WAIT), !dsl_pool_config_held(tx->tx_pool));
|
||||
|
||||
if ((txg_how & TXG_NOTHROTTLE))
|
||||
if ((flags & DMU_TX_ASSIGN_NOTHROTTLE))
|
||||
tx->tx_dirty_delayed = B_TRUE;
|
||||
|
||||
while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
|
||||
while ((err = dmu_tx_try_assign(tx, flags)) != 0) {
|
||||
dmu_tx_unassign(tx);
|
||||
|
||||
if (err != ERESTART || !(txg_how & TXG_WAIT))
|
||||
if (err != ERESTART || !(flags & DMU_TX_ASSIGN_WAIT))
|
||||
return (err);
|
||||
|
||||
dmu_tx_wait_flags(tx,
|
||||
(txg_how & TXG_NOSUSPEND) ? TXG_WAIT_F_NOSUSPEND : 0);
|
||||
/*
|
||||
* Wait until there's room in this txg, or until its been
|
||||
* synced out and a new one is available. We pass the NOSUSPEND
|
||||
* flag down if its set; if the pool suspends while we're
|
||||
* waiting for the txg, this will return and we'll loop and end
|
||||
* up back in dmu_tx_try_assign, which will deal with the
|
||||
* suspension appropriately.
|
||||
*/
|
||||
dmu_tx_wait_flags(tx, (flags & DMU_TX_ASSIGN_NOSUSPEND)
|
||||
? TXG_WAIT_F_NOSUSPEND : 0);
|
||||
}
|
||||
|
||||
txg_rele_to_quiesce(&tx->tx_txgh);
|
||||
|
@ -1082,7 +1090,7 @@ dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
|
|||
}
|
||||
|
||||
static void
|
||||
dmu_tx_wait_flags(dmu_tx_t *tx, txg_wait_flag_t how)
|
||||
dmu_tx_wait_flags(dmu_tx_t *tx, txg_wait_flag_t flags)
|
||||
{
|
||||
spa_t *spa = tx->tx_pool->dp_spa;
|
||||
dsl_pool_t *dp = tx->tx_pool;
|
||||
|
@ -1131,7 +1139,7 @@ dmu_tx_wait_flags(dmu_tx_t *tx, txg_wait_flag_t how)
|
|||
* It's also possible the pool will be force exported, in
|
||||
* which case we'll try again and notice this fact, and exit.
|
||||
*/
|
||||
txg_wait_synced_tx(dp, spa_last_synced_txg(spa) + 1, tx, how);
|
||||
txg_wait_synced_tx(dp, spa_last_synced_txg(spa) + 1, tx, flags);
|
||||
} else if (tx->tx_needassign_txh) {
|
||||
dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
|
||||
|
||||
|
@ -1149,7 +1157,7 @@ dmu_tx_wait_flags(dmu_tx_t *tx, txg_wait_flag_t how)
|
|||
* It's also possible the pool will be force exported, in
|
||||
* which case we'll try again and notice this fact, and exit.
|
||||
*/
|
||||
txg_wait_synced_tx(dp, spa_last_synced_txg(spa) + 1, tx, how);
|
||||
txg_wait_synced_tx(dp, spa_last_synced_txg(spa) + 1, tx, flags);
|
||||
}
|
||||
|
||||
spa_tx_assign_add_nsecs(spa, gethrtime() - before);
|
||||
|
|
|
@ -1106,7 +1106,7 @@ dsl_scan_restart_resilver(dsl_pool_t *dp, uint64_t txg)
|
|||
if (txg == 0) {
|
||||
dmu_tx_t *tx;
|
||||
tx = dmu_tx_create_dd(dp->dp_mos_dir);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0) {
|
||||
ASSERT(spa_exiting_any(dp->dp_spa));
|
||||
dmu_tx_abort(tx);
|
||||
|
|
|
@ -57,7 +57,7 @@ dsl_sync_task_common(const char *pool, dsl_checkfunc_t *checkfunc,
|
|||
|
||||
top:
|
||||
tx = dmu_tx_create_dd(dp->dp_mos_dir);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
ASSERT(spa_exiting_any(spa));
|
||||
dmu_tx_abort(tx);
|
||||
|
|
|
@ -1538,7 +1538,7 @@ static void
|
|||
spa_unload_log_sm_flush_all(spa_t *spa)
|
||||
{
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
int txerr = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int txerr = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (txerr != 0) {
|
||||
ASSERT(spa_exiting_any(spa));
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -2833,7 +2833,8 @@ spa_livelist_condense_cb(void *arg, zthr_t *t)
|
|||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
dmu_tx_hold_space(tx, 1);
|
||||
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
|
||||
err = dmu_tx_assign(tx,
|
||||
DMU_TX_ASSIGN_NOWAIT | DMU_TX_ASSIGN_NOTHROTTLE);
|
||||
if (err == 0) {
|
||||
/*
|
||||
* Prevent the condense zthr restarting before
|
||||
|
@ -7942,7 +7943,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
|
|||
/* finally, update the original pool's config */
|
||||
txg = spa_vdev_config_enter(spa);
|
||||
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0)
|
||||
dmu_tx_abort(tx);
|
||||
for (c = 0; c < children; c++) {
|
||||
|
|
|
@ -385,7 +385,7 @@ spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
|
|||
}
|
||||
|
||||
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT | TXG_NOSUSPEND);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT | DMU_TX_ASSIGN_NOSUSPEND);
|
||||
if (err) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
|
@ -556,7 +556,7 @@ spa_history_log_internal(spa_t *spa, const char *operation,
|
|||
/* create a tx if we didn't get one */
|
||||
if (tx == NULL) {
|
||||
htx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
if (dmu_tx_assign(htx, TXG_WAIT) != 0) {
|
||||
if (dmu_tx_assign(htx, DMU_TX_ASSIGN_WAIT) != 0) {
|
||||
dmu_tx_abort(htx);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -570,7 +570,7 @@ spa_condense_indirect_commit_entry(spa_t *spa,
|
|||
|
||||
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
|
||||
if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
|
||||
if (dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT) != 0) {
|
||||
ASSERT(spa_exiting_any(spa));
|
||||
dmu_tx_abort(tx);
|
||||
return;
|
||||
|
|
|
@ -134,7 +134,7 @@ vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
|
|||
* export, which requires the namespace lock) to recover.
|
||||
*/
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT));
|
||||
dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync,
|
||||
guid, tx);
|
||||
|
||||
|
@ -216,7 +216,7 @@ vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
|
|||
mutex_exit(&vd->vdev_initialize_io_lock);
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
|
||||
VERIFY0(dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT));
|
||||
uint64_t txg = dmu_tx_get_txg(tx);
|
||||
|
||||
spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
|
||||
|
|
|
@ -283,7 +283,7 @@ vdev_rebuild_initiate(vdev_t *vd)
|
|||
ASSERT(!vd->vdev_rebuilding);
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
int err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
ASSERT(spa_exiting_any(vd->vdev_spa));
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -576,7 +576,7 @@ vdev_rebuild_range(vdev_rebuild_t *vr, uint64_t start, uint64_t size)
|
|||
mutex_exit(&vr->vr_io_lock);
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
int err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
ASSERT(spa_exiting_any(spa));
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -914,7 +914,7 @@ vdev_rebuild_thread(void *arg)
|
|||
|
||||
dsl_pool_t *dp = spa_get_dsl(spa);
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(dp->dp_mos_dir);
|
||||
int txerr = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int txerr = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
|
||||
mutex_enter(&vd->vdev_rebuild_lock);
|
||||
if (txerr != 0) {
|
||||
|
|
|
@ -1535,7 +1535,7 @@ spa_vdev_remove_thread(void *arg)
|
|||
* If a tx can't be assigned, just punt and wait for
|
||||
* the next round. This must be an exiting spa.
|
||||
*/
|
||||
if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
|
||||
if (dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT) != 0) {
|
||||
ASSERT(spa_exiting_any(spa));
|
||||
dmu_tx_abort(tx);
|
||||
goto done;
|
||||
|
|
|
@ -317,7 +317,7 @@ vdev_trim_change_state(vdev_t *vd, vdev_trim_state_t new_state,
|
|||
vd->vdev_trim_state = new_state;
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
int txerr = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int txerr = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (txerr != 0) {
|
||||
ASSERT(spa_exiting_any(spa));
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -509,7 +509,7 @@ vdev_trim_range(trim_args_t *ta, uint64_t start, uint64_t size)
|
|||
mutex_exit(&vd->vdev_trim_io_lock);
|
||||
|
||||
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
|
||||
int err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err != 0) {
|
||||
ASSERT(spa_exiting_any(spa));
|
||||
mutex_enter(&vd->vdev_trim_io_lock);
|
||||
|
|
|
@ -338,7 +338,7 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
|
|||
}
|
||||
if (fuid_dirtied)
|
||||
zfs_fuid_txhold(zfsvfs, tx);
|
||||
err = dmu_tx_assign(tx, TXG_WAIT);
|
||||
err = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (err) {
|
||||
dmu_tx_abort(tx);
|
||||
return (err);
|
||||
|
|
|
@ -756,7 +756,7 @@ top:
|
|||
|
||||
zp->z_size = end;
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
zrele(zp);
|
||||
if (error == ERESTART) {
|
||||
|
|
|
@ -248,7 +248,7 @@ zfs_sa_set_xattr(znode_t *zp)
|
|||
dmu_tx_hold_sa_create(tx, size);
|
||||
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
|
||||
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
|
|
@ -556,7 +556,7 @@ zfs_write(znode_t *zp, zfs_uio_t *uio, int ioflag, cred_t *cr)
|
|||
MIN(n, max_blksz));
|
||||
DB_DNODE_EXIT(db);
|
||||
zfs_sa_upgrade_txholds(tx, zp);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
if (abuf != NULL)
|
||||
|
|
|
@ -695,7 +695,7 @@ zil_create(zilog_t *zilog)
|
|||
*/
|
||||
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
|
||||
tx = dmu_tx_create(zilog->zl_os);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0) {
|
||||
ASSERT(dmu_objset_exiting(zilog->zl_os));
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -768,7 +768,7 @@ zil_destroy(zilog_t *zilog, boolean_t keep_first)
|
|||
return;
|
||||
|
||||
tx = dmu_tx_create(zilog->zl_os);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0) {
|
||||
ASSERT(dmu_objset_exiting(zilog->zl_os));
|
||||
dmu_tx_abort(tx);
|
||||
|
@ -1530,9 +1530,10 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
|
|||
* Since we are not going to create any new dirty data, and we
|
||||
* can even help with clearing the existing dirty data, we
|
||||
* should not be subject to the dirty data based delays. We
|
||||
* use TXG_NOTHROTTLE to bypass the delay mechanism.
|
||||
* use DMU_TX_ASSIGN_NOTHROTTLE to bypass the delay mechanism.
|
||||
*/
|
||||
if (dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE) != 0) {
|
||||
if (dmu_tx_assign(tx,
|
||||
DMU_TX_ASSIGN_WAIT | DMU_TX_ASSIGN_NOTHROTTLE) != 0) {
|
||||
ASSERT(dmu_objset_exiting(zilog->zl_os));
|
||||
dmu_tx_abort(tx);
|
||||
return (NULL);
|
||||
|
@ -2829,7 +2830,7 @@ zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
|
|||
{
|
||||
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
|
||||
|
||||
if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
|
||||
if (dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT) != 0) {
|
||||
ASSERT(dmu_objset_exiting(zilog->zl_os));
|
||||
dmu_tx_abort(tx);
|
||||
return;
|
||||
|
|
|
@ -281,7 +281,7 @@ zvol_update_volsize(uint64_t volsize, objset_t *os)
|
|||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
return (SET_ERROR(error));
|
||||
|
@ -435,7 +435,7 @@ zvol_set_volblocksize(const char *name, uint64_t volblocksize)
|
|||
|
||||
tx = dmu_tx_create(zv->zv_objset);
|
||||
dmu_tx_hold_bonus(tx, ZVOL_OBJ);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
@ -473,7 +473,7 @@ zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
|
|||
|
||||
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
|
||||
dmu_tx_mark_netfree(tx);
|
||||
int error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
int error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error != 0) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
@ -518,7 +518,7 @@ zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
|
|||
|
||||
tx = dmu_tx_create(os);
|
||||
dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
|
||||
error = dmu_tx_assign(tx, TXG_WAIT);
|
||||
error = dmu_tx_assign(tx, DMU_TX_ASSIGN_WAIT);
|
||||
if (error) {
|
||||
dmu_tx_abort(tx);
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue