Fix gcc missing parenthesis warnings
Gcc -Wall warn: 'missing parenthesis' Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
parent
e75c13c353
commit
c65aa5b2b9
|
@ -764,7 +764,7 @@ zfs_do_create(int argc, char **argv)
|
|||
zfs_prop_t resv_prop;
|
||||
char *strval;
|
||||
|
||||
if (p = strchr(argv[0], '/'))
|
||||
if ((p = strchr(argv[0], '/')))
|
||||
*p = '\0';
|
||||
zpool_handle = zpool_open(g_zfs, argv[0]);
|
||||
if (p != NULL)
|
||||
|
@ -4035,7 +4035,7 @@ zfs_do_diff(int argc, char **argv)
|
|||
if (copy == NULL)
|
||||
usage(B_FALSE);
|
||||
|
||||
if (atp = strchr(copy, '@'))
|
||||
if ((atp = strchr(copy, '@')))
|
||||
*atp = '\0';
|
||||
|
||||
if ((zhp = zfs_open(g_zfs, copy, ZFS_TYPE_FILESYSTEM)) == NULL)
|
||||
|
|
|
@ -131,7 +131,7 @@ pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err)
|
|||
for (i = 0; i < argc; i++) {
|
||||
zpool_handle_t *zhp;
|
||||
|
||||
if (zhp = zpool_open_canfail(g_zfs, argv[i])) {
|
||||
if ((zhp = zpool_open_canfail(g_zfs, argv[i]))) {
|
||||
if (add_pool(zhp, zlp) != 0)
|
||||
*err = B_TRUE;
|
||||
} else {
|
||||
|
|
|
@ -623,7 +623,7 @@ libzfs_mnttab_fini(libzfs_handle_t *hdl)
|
|||
void *cookie = NULL;
|
||||
mnttab_node_t *mtn;
|
||||
|
||||
while (mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie)) {
|
||||
while ((mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie))) {
|
||||
free(mtn->mtn_mt.mnt_special);
|
||||
free(mtn->mtn_mt.mnt_mountp);
|
||||
free(mtn->mtn_mt.mnt_fstype);
|
||||
|
@ -695,7 +695,7 @@ libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname)
|
|||
mnttab_node_t *ret;
|
||||
|
||||
find.mtn_mt.mnt_special = (char *)fsname;
|
||||
if (ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL)) {
|
||||
if ((ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL))) {
|
||||
avl_remove(&hdl->libzfs_mnttab_cache, ret);
|
||||
free(ret->mtn_mt.mnt_special);
|
||||
free(ret->mtn_mt.mnt_mountp);
|
||||
|
@ -2734,7 +2734,7 @@ create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
|
|||
* up to the prefixlen-long one.
|
||||
*/
|
||||
for (cp = target + prefixlen + 1;
|
||||
cp = strchr(cp, '/'); *cp = '/', cp++) {
|
||||
(cp = strchr(cp, '/')); *cp = '/', cp++) {
|
||||
char *logstr;
|
||||
|
||||
*cp = '\0';
|
||||
|
|
|
@ -346,7 +346,7 @@ write_inuse_diffs(FILE *fp, differ_info_t *di, dmu_diff_record_t *dr)
|
|||
int err;
|
||||
|
||||
for (o = dr->ddr_first; o <= dr->ddr_last; o++) {
|
||||
if (err = write_inuse_diffs_one(fp, di, o))
|
||||
if ((err = write_inuse_diffs_one(fp, di, o)))
|
||||
return (err);
|
||||
}
|
||||
return (0);
|
||||
|
|
|
@ -1297,7 +1297,7 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
|
|||
if (flags.dedup) {
|
||||
featureflags |= (DMU_BACKUP_FEATURE_DEDUP |
|
||||
DMU_BACKUP_FEATURE_DEDUPPROPS);
|
||||
if (err = pipe(pipefd)) {
|
||||
if ((err = pipe(pipefd))) {
|
||||
zfs_error_aux(zhp->zfs_hdl, strerror(errno));
|
||||
return (zfs_error(zhp->zfs_hdl, EZFS_PIPEFAILED,
|
||||
errbuf));
|
||||
|
@ -1305,7 +1305,7 @@ zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
|
|||
dda.outputfd = outfd;
|
||||
dda.inputfd = pipefd[1];
|
||||
dda.dedup_hdl = zhp->zfs_hdl;
|
||||
if (err = pthread_create(&tid, NULL, cksummer, &dda)) {
|
||||
if ((err = pthread_create(&tid, NULL, cksummer, &dda))) {
|
||||
(void) close(pipefd[0]);
|
||||
(void) close(pipefd[1]);
|
||||
zfs_error_aux(zhp->zfs_hdl, strerror(errno));
|
||||
|
|
|
@ -2382,7 +2382,7 @@ nvlist_xpack(nvlist_t *nvl, char **bufp, size_t *buflen, int encoding,
|
|||
*/
|
||||
nv_priv_init(&nvpriv, nva, 0);
|
||||
|
||||
if (err = nvlist_size(nvl, &alloc_size, encoding))
|
||||
if ((err = nvlist_size(nvl, &alloc_size, encoding)))
|
||||
return (err);
|
||||
|
||||
if ((buf = nv_mem_zalloc(&nvpriv, alloc_size)) == NULL)
|
||||
|
|
|
@ -144,9 +144,10 @@
|
|||
#define U8_16BIT_TABLE_INDICATOR (0x8000U)
|
||||
|
||||
/* The following are some convenience macros. */
|
||||
#define U8_PUT_3BYTES_INTO_UTF32(u, b1, b2, b3) \
|
||||
(u) = ((uint32_t)(b1) & 0x0F) << 12 | ((uint32_t)(b2) & 0x3F) << 6 | \
|
||||
(uint32_t)(b3) & 0x3F;
|
||||
#define U8_PUT_3BYTES_INTO_UTF32(u, b1, b2, b3) \
|
||||
(u) = ((((uint32_t)(b1) & 0x0F) << 12) | \
|
||||
(((uint32_t)(b2) & 0x3F) << 6) | \
|
||||
((uint32_t)(b3) & 0x3F));
|
||||
|
||||
#define U8_SIMPLE_SWAP(a, b, t) \
|
||||
(t) = (a); \
|
||||
|
|
|
@ -182,8 +182,8 @@ zfs_deleg_verify_nvlist(nvlist_t *nvp)
|
|||
nvpair_name(perm_name));
|
||||
if (error)
|
||||
return (-1);
|
||||
} while (perm_name = nvlist_next_nvpair(perms, perm_name));
|
||||
} while (who = nvlist_next_nvpair(nvp, who));
|
||||
} while ((perm_name = nvlist_next_nvpair(perms, perm_name)));
|
||||
} while ((who = nvlist_next_nvpair(nvp, who)));
|
||||
return (0);
|
||||
}
|
||||
|
||||
|
|
|
@ -3414,7 +3414,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
|
|||
* in order to compress/encrypt/etc the data. We therefor need to
|
||||
* make sure that there is sufficient available memory for this.
|
||||
*/
|
||||
if (error = arc_memory_throttle(reserve, anon_size, txg))
|
||||
if ((error = arc_memory_throttle(reserve, anon_size, txg)))
|
||||
return (error);
|
||||
|
||||
/*
|
||||
|
|
|
@ -58,7 +58,7 @@ bplist_iterate(bplist_t *bpl, bplist_itor_t *func, void *arg, dmu_tx_t *tx)
|
|||
bplist_entry_t *bpe;
|
||||
|
||||
mutex_enter(&bpl->bpl_lock);
|
||||
while (bpe = list_head(&bpl->bpl_list)) {
|
||||
while ((bpe = list_head(&bpl->bpl_list))) {
|
||||
list_remove(&bpl->bpl_list, bpe);
|
||||
mutex_exit(&bpl->bpl_lock);
|
||||
func(arg, &bpe->bpe_blk, tx);
|
||||
|
|
|
@ -1810,7 +1810,7 @@ dbuf_prefetch(dnode_t *dn, uint64_t blkid)
|
|||
return;
|
||||
|
||||
/* dbuf_find() returns with db_mtx held */
|
||||
if (db = dbuf_find(dn, 0, blkid)) {
|
||||
if ((db = dbuf_find(dn, 0, blkid))) {
|
||||
/*
|
||||
* This dbuf is already in the cache. We assume that
|
||||
* it is already CACHED, or else about to be either
|
||||
|
@ -2395,7 +2395,7 @@ dbuf_sync_list(list_t *list, dmu_tx_t *tx)
|
|||
{
|
||||
dbuf_dirty_record_t *dr;
|
||||
|
||||
while (dr = list_head(list)) {
|
||||
while ((dr = list_head(list))) {
|
||||
if (dr->dr_zio != NULL) {
|
||||
/*
|
||||
* If we find an already initialized zio then we
|
||||
|
|
|
@ -1016,7 +1016,7 @@ dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx)
|
|||
{
|
||||
dnode_t *dn;
|
||||
|
||||
while (dn = list_head(list)) {
|
||||
while ((dn = list_head(list))) {
|
||||
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
|
||||
ASSERT(dn->dn_dbuf->db_data_pending);
|
||||
/*
|
||||
|
@ -1157,7 +1157,7 @@ dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
|
|||
dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx);
|
||||
|
||||
list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
|
||||
while (dr = list_head(list)) {
|
||||
while ((dr = list_head(list)) != NULL) {
|
||||
ASSERT(dr->dr_dbuf->db_level == 0);
|
||||
list_remove(list, dr);
|
||||
if (dr->dr_zio)
|
||||
|
@ -1228,7 +1228,7 @@ dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
|
|||
|
||||
ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os));
|
||||
|
||||
while (dn = list_head(list)) {
|
||||
while ((dn = list_head(list)) != NULL) {
|
||||
int flags;
|
||||
ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
|
||||
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
|
||||
|
|
|
@ -1201,8 +1201,9 @@ restore_write_byref(struct restorearg *ra, objset_t *os,
|
|||
ref_os = os;
|
||||
}
|
||||
|
||||
if (err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
|
||||
drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH))
|
||||
err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
|
||||
drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
|
||||
if (err)
|
||||
return (err);
|
||||
|
||||
tx = dmu_tx_create(os);
|
||||
|
|
|
@ -1125,7 +1125,7 @@ dmu_tx_commit(dmu_tx_t *tx)
|
|||
|
||||
ASSERT(tx->tx_txg != 0);
|
||||
|
||||
while (txh = list_head(&tx->tx_holds)) {
|
||||
while ((txh = list_head(&tx->tx_holds))) {
|
||||
dnode_t *dn = txh->txh_dnode;
|
||||
|
||||
list_remove(&tx->tx_holds, txh);
|
||||
|
@ -1173,7 +1173,7 @@ dmu_tx_abort(dmu_tx_t *tx)
|
|||
|
||||
ASSERT(tx->tx_txg == 0);
|
||||
|
||||
while (txh = list_head(&tx->tx_holds)) {
|
||||
while ((txh = list_head(&tx->tx_holds))) {
|
||||
dnode_t *dn = txh->txh_dnode;
|
||||
|
||||
list_remove(&tx->tx_holds, txh);
|
||||
|
@ -1227,7 +1227,7 @@ dmu_tx_do_callbacks(list_t *cb_list, int error)
|
|||
{
|
||||
dmu_tx_callback_t *dcb;
|
||||
|
||||
while (dcb = list_head(cb_list)) {
|
||||
while ((dcb = list_head(cb_list))) {
|
||||
list_remove(cb_list, dcb);
|
||||
dcb->dcb_func(dcb->dcb_data, error);
|
||||
kmem_free(dcb, sizeof (dmu_tx_callback_t));
|
||||
|
|
|
@ -665,7 +665,7 @@ dmu_zfetch(zfetch_t *zf, uint64_t offset, uint64_t size, int prefetched)
|
|||
ZFETCHSTAT_BUMP(zfetchstat_hits);
|
||||
} else {
|
||||
ZFETCHSTAT_BUMP(zfetchstat_misses);
|
||||
if (fetched = dmu_zfetch_colinear(zf, &zst)) {
|
||||
if ((fetched = dmu_zfetch_colinear(zf, &zst))) {
|
||||
ZFETCHSTAT_BUMP(zfetchstat_colinear_hits);
|
||||
} else {
|
||||
ZFETCHSTAT_BUMP(zfetchstat_colinear_misses);
|
||||
|
|
|
@ -1079,8 +1079,8 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
|
|||
zrl_init(&dnh[i].dnh_zrlock);
|
||||
dnh[i].dnh_dnode = NULL;
|
||||
}
|
||||
if (winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
|
||||
dnode_buf_pageout)) {
|
||||
if ((winner = dmu_buf_set_user(&db->db, children_dnodes, NULL,
|
||||
dnode_buf_pageout))) {
|
||||
kmem_free(children_dnodes, sizeof (dnode_children_t) +
|
||||
(epb - 1) * sizeof (dnode_handle_t));
|
||||
children_dnodes = winner;
|
||||
|
@ -1625,7 +1625,7 @@ dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
|
|||
int shift = epbs + dn->dn_datablkshift;
|
||||
|
||||
first = blkid >> epbs;
|
||||
if (db = dbuf_hold_level(dn, 1, first, FTAG)) {
|
||||
if ((db = dbuf_hold_level(dn, 1, first, FTAG))) {
|
||||
dbuf_will_dirty(db, tx);
|
||||
dbuf_rele(db, FTAG);
|
||||
}
|
||||
|
|
|
@ -435,7 +435,7 @@ dnode_undirty_dbufs(list_t *list)
|
|||
{
|
||||
dbuf_dirty_record_t *dr;
|
||||
|
||||
while (dr = list_head(list)) {
|
||||
while ((dr = list_head(list))) {
|
||||
dmu_buf_impl_t *db = dr->dr_dbuf;
|
||||
uint64_t txg = dr->dr_txg;
|
||||
|
||||
|
@ -635,7 +635,7 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
|
|||
}
|
||||
|
||||
/* process all the "freed" ranges in the file */
|
||||
while (rp = avl_last(&dn->dn_ranges[txgoff])) {
|
||||
while ((rp = avl_last(&dn->dn_ranges[txgoff]))) {
|
||||
dnode_sync_free_range(dn, rp->fr_blkid, rp->fr_nblks, tx);
|
||||
/* grab the mutex so we don't race with dnode_block_freed() */
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
|
|
|
@ -96,13 +96,13 @@ dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr)
|
|||
if ((error = dsl_deleg_access(ddname, ZFS_DELEG_PERM_ALLOW, cr)) != 0)
|
||||
return (error);
|
||||
|
||||
while (whopair = nvlist_next_nvpair(nvp, whopair)) {
|
||||
while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
|
||||
nvlist_t *perms;
|
||||
nvpair_t *permpair = NULL;
|
||||
|
||||
VERIFY(nvpair_value_nvlist(whopair, &perms) == 0);
|
||||
|
||||
while (permpair = nvlist_next_nvpair(perms, permpair)) {
|
||||
while ((permpair = nvlist_next_nvpair(perms, permpair))) {
|
||||
const char *perm = nvpair_name(permpair);
|
||||
|
||||
if (strcmp(perm, ZFS_DELEG_PERM_ALLOW) == 0)
|
||||
|
@ -133,7 +133,7 @@ dsl_deleg_can_unallow(char *ddname, nvlist_t *nvp, cred_t *cr)
|
|||
(void) snprintf(idstr, sizeof (idstr), "%lld",
|
||||
(longlong_t)crgetuid(cr));
|
||||
|
||||
while (whopair = nvlist_next_nvpair(nvp, whopair)) {
|
||||
while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
|
||||
zfs_deleg_who_type_t type = nvpair_name(whopair)[0];
|
||||
|
||||
if (type != ZFS_DELEG_USER &&
|
||||
|
@ -161,7 +161,7 @@ dsl_deleg_set_sync(void *arg1, void *arg2, dmu_tx_t *tx)
|
|||
DMU_OT_DSL_PERMS, DMU_OT_NONE, 0, tx);
|
||||
}
|
||||
|
||||
while (whopair = nvlist_next_nvpair(nvp, whopair)) {
|
||||
while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
|
||||
const char *whokey = nvpair_name(whopair);
|
||||
nvlist_t *perms;
|
||||
nvpair_t *permpair = NULL;
|
||||
|
@ -176,7 +176,7 @@ dsl_deleg_set_sync(void *arg1, void *arg2, dmu_tx_t *tx)
|
|||
whokey, 8, 1, &jumpobj, tx) == 0);
|
||||
}
|
||||
|
||||
while (permpair = nvlist_next_nvpair(perms, permpair)) {
|
||||
while ((permpair = nvlist_next_nvpair(perms, permpair))) {
|
||||
const char *perm = nvpair_name(permpair);
|
||||
uint64_t n = 0;
|
||||
|
||||
|
@ -202,7 +202,7 @@ dsl_deleg_unset_sync(void *arg1, void *arg2, dmu_tx_t *tx)
|
|||
if (zapobj == 0)
|
||||
return;
|
||||
|
||||
while (whopair = nvlist_next_nvpair(nvp, whopair)) {
|
||||
while ((whopair = nvlist_next_nvpair(nvp, whopair))) {
|
||||
const char *whokey = nvpair_name(whopair);
|
||||
nvlist_t *perms;
|
||||
nvpair_t *permpair = NULL;
|
||||
|
@ -224,7 +224,7 @@ dsl_deleg_unset_sync(void *arg1, void *arg2, dmu_tx_t *tx)
|
|||
if (zap_lookup(mos, zapobj, whokey, 8, 1, &jumpobj) != 0)
|
||||
continue;
|
||||
|
||||
while (permpair = nvlist_next_nvpair(perms, permpair)) {
|
||||
while ((permpair = nvlist_next_nvpair(perms, permpair))) {
|
||||
const char *perm = nvpair_name(permpair);
|
||||
uint64_t n = 0;
|
||||
|
||||
|
@ -261,7 +261,7 @@ dsl_deleg_set(const char *ddname, nvlist_t *nvp, boolean_t unset)
|
|||
return (ENOTSUP);
|
||||
}
|
||||
|
||||
while (whopair = nvlist_next_nvpair(nvp, whopair))
|
||||
while ((whopair = nvlist_next_nvpair(nvp, whopair)))
|
||||
blocks_modified++;
|
||||
|
||||
error = dsl_sync_task_do(dd->dd_pool, NULL,
|
||||
|
|
|
@ -881,7 +881,7 @@ dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
|
|||
if (tr_cookie == NULL)
|
||||
return;
|
||||
|
||||
while (tr = list_head(tr_list)) {
|
||||
while ((tr = list_head(tr_list))) {
|
||||
if (tr->tr_dp) {
|
||||
dsl_pool_tempreserve_clear(tr->tr_dp, tr->tr_size, tx);
|
||||
} else if (tr->tr_ds) {
|
||||
|
@ -1285,8 +1285,8 @@ dsl_dir_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
|
|||
if (closest_common_ancestor(dd, ra->newparent) == dd)
|
||||
return (EINVAL);
|
||||
|
||||
if (err = dsl_dir_transfer_possible(dd->dd_parent,
|
||||
ra->newparent, myspace))
|
||||
if ((err = dsl_dir_transfer_possible(dd->dd_parent,
|
||||
ra->newparent, myspace)))
|
||||
return (err);
|
||||
}
|
||||
|
||||
|
|
|
@ -322,7 +322,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
|
|||
start = gethrtime();
|
||||
|
||||
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
|
||||
while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
|
||||
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) {
|
||||
/*
|
||||
* We must not sync any non-MOS datasets twice, because
|
||||
* we may have taken a snapshot of them. However, we
|
||||
|
@ -350,7 +350,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
|
|||
* whose ds_bp will be rewritten when we do this 2nd sync.
|
||||
*/
|
||||
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
|
||||
while (ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) {
|
||||
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg))) {
|
||||
ASSERT(list_link_active(&ds->ds_synced_link));
|
||||
dmu_buf_rele(ds->ds_dbuf, ds);
|
||||
dsl_dataset_sync(ds, zio, tx);
|
||||
|
@ -367,7 +367,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
|
|||
deadlist_enqueue_cb, &ds->ds_deadlist, tx);
|
||||
}
|
||||
|
||||
while (dstg = txg_list_remove(&dp->dp_sync_tasks, txg)) {
|
||||
while ((dstg = txg_list_remove(&dp->dp_sync_tasks, txg))) {
|
||||
/*
|
||||
* No more sync tasks should have been added while we
|
||||
* were syncing.
|
||||
|
@ -378,7 +378,7 @@ dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
|
|||
DTRACE_PROBE(pool_sync__3task);
|
||||
|
||||
start = gethrtime();
|
||||
while (dd = txg_list_remove(&dp->dp_dirty_dirs, txg))
|
||||
while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)))
|
||||
dsl_dir_sync(dd, tx);
|
||||
write_time += gethrtime() - start;
|
||||
|
||||
|
@ -448,7 +448,7 @@ dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
|
|||
dsl_dataset_t *ds;
|
||||
objset_t *os;
|
||||
|
||||
while (ds = list_head(&dp->dp_synced_datasets)) {
|
||||
while ((ds = list_head(&dp->dp_synced_datasets))) {
|
||||
list_remove(&dp->dp_synced_datasets, ds);
|
||||
os = ds->ds_objset;
|
||||
zil_clean(os->os_zil, txg);
|
||||
|
|
|
@ -830,7 +830,7 @@ dsl_props_set(const char *dsname, zprop_source_t source, nvlist_t *props)
|
|||
dsl_props_arg_t pa;
|
||||
int err;
|
||||
|
||||
if (err = dsl_dataset_hold(dsname, FTAG, &ds))
|
||||
if ((err = dsl_dataset_hold(dsname, FTAG, &ds)))
|
||||
return (err);
|
||||
/*
|
||||
* Do these checks before the syncfunc, since it can't fail.
|
||||
|
|
|
@ -147,7 +147,7 @@ dsl_sync_task_group_destroy(dsl_sync_task_group_t *dstg)
|
|||
{
|
||||
dsl_sync_task_t *dst;
|
||||
|
||||
while (dst = list_head(&dstg->dstg_tasks)) {
|
||||
while ((dst = list_head(&dstg->dstg_tasks))) {
|
||||
list_remove(&dstg->dstg_tasks, dst);
|
||||
kmem_free(dst, sizeof (dsl_sync_task_t));
|
||||
}
|
||||
|
|
|
@ -72,13 +72,13 @@ refcount_destroy_many(refcount_t *rc, uint64_t number)
|
|||
reference_t *ref;
|
||||
|
||||
ASSERT(rc->rc_count == number);
|
||||
while (ref = list_head(&rc->rc_list)) {
|
||||
while ((ref = list_head(&rc->rc_list))) {
|
||||
list_remove(&rc->rc_list, ref);
|
||||
kmem_cache_free(reference_cache, ref);
|
||||
}
|
||||
list_destroy(&rc->rc_list);
|
||||
|
||||
while (ref = list_head(&rc->rc_removed)) {
|
||||
while ((ref = list_head(&rc->rc_removed))) {
|
||||
list_remove(&rc->rc_removed, ref);
|
||||
kmem_cache_free(reference_history_cache, ref->ref_removed);
|
||||
kmem_cache_free(reference_cache, ref);
|
||||
|
|
|
@ -1105,16 +1105,16 @@ sa_tear_down(objset_t *os)
|
|||
sa_free_attr_table(sa);
|
||||
|
||||
cookie = NULL;
|
||||
while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
|
||||
while ((layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))){
|
||||
sa_idx_tab_t *tab;
|
||||
while (tab = list_head(&layout->lot_idx_tab)) {
|
||||
while ((tab = list_head(&layout->lot_idx_tab))) {
|
||||
ASSERT(refcount_count(&tab->sa_refcount));
|
||||
sa_idx_tab_rele(os, tab);
|
||||
}
|
||||
}
|
||||
|
||||
cookie = NULL;
|
||||
while (layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie)) {
|
||||
while ((layout = avl_destroy_nodes(&sa->sa_layout_num_tree, &cookie))){
|
||||
kmem_free(layout->lot_attrs,
|
||||
sizeof (sa_attr_type_t) * layout->lot_attr_count);
|
||||
kmem_free(layout, sizeof (sa_lot_t));
|
||||
|
@ -1387,7 +1387,7 @@ sa_handle_get(objset_t *objset, uint64_t objid, void *userp,
|
|||
dmu_buf_t *db;
|
||||
int error;
|
||||
|
||||
if (error = dmu_bonus_hold(objset, objid, NULL, &db))
|
||||
if ((error = dmu_bonus_hold(objset, objid, NULL, &db)))
|
||||
return (error);
|
||||
|
||||
return (sa_handle_get_from_db(objset, db, userp, hdl_type,
|
||||
|
|
|
@ -271,8 +271,8 @@ spa_prop_get(spa_t *spa, nvlist_t **nvp)
|
|||
|
||||
dp = spa_get_dsl(spa);
|
||||
rw_enter(&dp->dp_config_rwlock, RW_READER);
|
||||
if (err = dsl_dataset_hold_obj(dp,
|
||||
za.za_first_integer, FTAG, &ds)) {
|
||||
if ((err = dsl_dataset_hold_obj(dp,
|
||||
za.za_first_integer, FTAG, &ds))) {
|
||||
rw_exit(&dp->dp_config_rwlock);
|
||||
break;
|
||||
}
|
||||
|
@ -398,7 +398,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
|
|||
break;
|
||||
}
|
||||
|
||||
if (error = dmu_objset_hold(strval, FTAG, &os))
|
||||
if ((error = dmu_objset_hold(strval,FTAG,&os)))
|
||||
break;
|
||||
|
||||
/* Must be ZPL and not gzip compressed. */
|
||||
|
@ -2171,7 +2171,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
|
|||
* to start pushing transactions.
|
||||
*/
|
||||
if (state != SPA_LOAD_TRYIMPORT) {
|
||||
if (error = spa_load_verify(spa))
|
||||
if ((error = spa_load_verify(spa)))
|
||||
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
|
||||
error));
|
||||
}
|
||||
|
@ -5596,7 +5596,7 @@ spa_sync(spa_t *spa, uint64_t txg)
|
|||
ddt_sync(spa, txg);
|
||||
dsl_scan_sync(dp, tx);
|
||||
|
||||
while (vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
|
||||
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
|
||||
vdev_sync(vd, txg);
|
||||
|
||||
if (pass == 1)
|
||||
|
@ -5678,7 +5678,7 @@ spa_sync(spa_t *spa, uint64_t txg)
|
|||
/*
|
||||
* Update usable space statistics.
|
||||
*/
|
||||
while (vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
|
||||
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
|
||||
vdev_sync_done(vd, txg);
|
||||
|
||||
spa_update_dspace(spa);
|
||||
|
|
|
@ -2034,7 +2034,7 @@ vdev_sync_done(vdev_t *vd, uint64_t txg)
|
|||
|
||||
ASSERT(!vd->vdev_ishole);
|
||||
|
||||
while (msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg)))
|
||||
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
|
||||
metaslab_sync_done(msp, txg);
|
||||
|
||||
if (reassess)
|
||||
|
|
|
@ -350,7 +350,7 @@ mze_destroy(zap_t *zap)
|
|||
mzap_ent_t *mze;
|
||||
void *avlcookie = NULL;
|
||||
|
||||
while (mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie))
|
||||
while ((mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie)))
|
||||
kmem_free(mze, sizeof (mzap_ent_t));
|
||||
avl_destroy(&zap->zap_m.zap_avl);
|
||||
}
|
||||
|
|
|
@ -168,12 +168,12 @@ zfs_fuid_table_destroy(avl_tree_t *idx_tree, avl_tree_t *domain_tree)
|
|||
void *cookie;
|
||||
|
||||
cookie = NULL;
|
||||
while (domnode = avl_destroy_nodes(domain_tree, &cookie))
|
||||
while ((domnode = avl_destroy_nodes(domain_tree, &cookie)))
|
||||
ksiddomain_rele(domnode->f_ksid);
|
||||
|
||||
avl_destroy(domain_tree);
|
||||
cookie = NULL;
|
||||
while (domnode = avl_destroy_nodes(idx_tree, &cookie))
|
||||
while ((domnode = avl_destroy_nodes(idx_tree, &cookie)))
|
||||
kmem_free(domnode, sizeof (fuid_domain_t));
|
||||
avl_destroy(idx_tree);
|
||||
}
|
||||
|
|
|
@ -1164,8 +1164,8 @@ zfs_ioc_pool_create(zfs_cmd_t *zc)
|
|||
nvlist_t *zplprops = NULL;
|
||||
char *buf;
|
||||
|
||||
if (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
|
||||
zc->zc_iflags, &config))
|
||||
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
|
||||
zc->zc_iflags, &config)))
|
||||
return (error);
|
||||
|
||||
if (zc->zc_nvlist_src_size != 0 && (error =
|
||||
|
@ -1454,7 +1454,7 @@ zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc)
|
|||
{
|
||||
int error;
|
||||
|
||||
if (error = dsl_dsobj_to_dsname(zc->zc_name, zc->zc_obj, zc->zc_value))
|
||||
if ((error = dsl_dsobj_to_dsname(zc->zc_name,zc->zc_obj,zc->zc_value)))
|
||||
return (error);
|
||||
|
||||
return (0);
|
||||
|
@ -1670,8 +1670,8 @@ zfs_ioc_vdev_split(zfs_cmd_t *zc)
|
|||
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
|
||||
return (error);
|
||||
|
||||
if (error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
|
||||
zc->zc_iflags, &config)) {
|
||||
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
|
||||
zc->zc_iflags, &config))) {
|
||||
spa_close(spa, FTAG);
|
||||
return (error);
|
||||
}
|
||||
|
@ -1773,7 +1773,7 @@ zfs_ioc_objset_stats(zfs_cmd_t *zc)
|
|||
objset_t *os = NULL;
|
||||
int error;
|
||||
|
||||
if (error = dmu_objset_hold(zc->zc_name, FTAG, &os))
|
||||
if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os)))
|
||||
return (error);
|
||||
|
||||
error = zfs_ioc_objset_stats_impl(zc, os);
|
||||
|
@ -1797,13 +1797,13 @@ zfs_ioc_objset_stats(zfs_cmd_t *zc)
|
|||
* local property values.
|
||||
*/
|
||||
static int
|
||||
zfs_ioc_objset_recvd_props(zfs_cmd_t *zc)
|
||||
zfs_ioc_objset_recvd_props(struct file *filp, zfs_cmd_t *zc)
|
||||
{
|
||||
objset_t *os = NULL;
|
||||
int error;
|
||||
nvlist_t *nv;
|
||||
|
||||
if (error = dmu_objset_hold(zc->zc_name, FTAG, &os))
|
||||
if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os)))
|
||||
return (error);
|
||||
|
||||
/*
|
||||
|
@ -1858,7 +1858,7 @@ zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
|
|||
int err;
|
||||
|
||||
/* XXX reading without owning */
|
||||
if (err = dmu_objset_hold(zc->zc_name, FTAG, &os))
|
||||
if ((err = dmu_objset_hold(zc->zc_name, FTAG, &os)))
|
||||
return (err);
|
||||
|
||||
dmu_objset_fast_stat(os, &zc->zc_objset_stats);
|
||||
|
@ -1926,7 +1926,7 @@ zfs_ioc_dataset_list_next(zfs_cmd_t *zc)
|
|||
size_t orig_len = strlen(zc->zc_name);
|
||||
|
||||
top:
|
||||
if (error = dmu_objset_hold(zc->zc_name, FTAG, &os)) {
|
||||
if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os))) {
|
||||
if (error == ENOENT)
|
||||
error = ESRCH;
|
||||
return (error);
|
||||
|
@ -2368,8 +2368,8 @@ zfs_check_userprops(char *fsname, nvlist_t *nvl)
|
|||
nvpair_type(pair) != DATA_TYPE_STRING)
|
||||
return (EINVAL);
|
||||
|
||||
if (error = zfs_secpolicy_write_perms(fsname,
|
||||
ZFS_DELEG_PERM_USERPROP, CRED()))
|
||||
if ((error = zfs_secpolicy_write_perms(fsname,
|
||||
ZFS_DELEG_PERM_USERPROP, CRED())))
|
||||
return (error);
|
||||
|
||||
if (strlen(propname) >= ZAP_MAXNAMELEN)
|
||||
|
@ -2552,8 +2552,8 @@ zfs_ioc_pool_set_props(zfs_cmd_t *zc)
|
|||
int error;
|
||||
nvpair_t *pair;
|
||||
|
||||
if (error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
|
||||
zc->zc_iflags, &props))
|
||||
if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
|
||||
zc->zc_iflags, &props)))
|
||||
return (error);
|
||||
|
||||
/*
|
||||
|
@ -3274,8 +3274,8 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
|
|||
|
||||
if (prop == ZPROP_INVAL) {
|
||||
if (zfs_prop_user(propname)) {
|
||||
if (err = zfs_secpolicy_write_perms(dsname,
|
||||
ZFS_DELEG_PERM_USERPROP, cr))
|
||||
if ((err = zfs_secpolicy_write_perms(dsname,
|
||||
ZFS_DELEG_PERM_USERPROP, cr)))
|
||||
return (err);
|
||||
return (0);
|
||||
}
|
||||
|
@ -3298,7 +3298,7 @@ zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
|
|||
return (EINVAL);
|
||||
}
|
||||
|
||||
if (err = zfs_secpolicy_write_perms(dsname, perm, cr))
|
||||
if ((err = zfs_secpolicy_write_perms(dsname, perm, cr)))
|
||||
return (err);
|
||||
return (0);
|
||||
}
|
||||
|
|
|
@ -1421,7 +1421,7 @@ zil_commit_writer(zilog_t *zilog)
|
|||
}
|
||||
|
||||
DTRACE_PROBE1(zil__cw1, zilog_t *, zilog);
|
||||
while (itx = list_head(&zilog->zl_itx_commit_list)) {
|
||||
while ((itx = list_head(&zilog->zl_itx_commit_list))) {
|
||||
txg = itx->itx_lr.lrc_txg;
|
||||
ASSERT(txg);
|
||||
|
||||
|
|
Loading…
Reference in New Issue