Remove atomics from zh_refcount

It is protected by z_hold_locks, so we do not need more serialization,
simple integer math should be fine.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Signed-off-by:  Alexander Motin <mav@FreeBSD.org>
Closes #14196
This commit is contained in:
Alexander Motin 2022-11-28 14:36:53 -05:00 committed by Brian Behlendorf
parent 82e3117095
commit 2098a00318
2 changed files with 8 additions and 10 deletions

View File

@ -217,9 +217,9 @@ typedef struct znode {
typedef struct znode_hold { typedef struct znode_hold {
uint64_t zh_obj; /* object id */ uint64_t zh_obj; /* object id */
kmutex_t zh_lock; /* lock serializing object access */
avl_node_t zh_node; /* avl tree linkage */ avl_node_t zh_node; /* avl tree linkage */
zfs_refcount_t zh_refcount; /* active consumer reference count */ kmutex_t zh_lock; /* lock serializing object access */
int zh_refcount; /* active consumer reference count */
} znode_hold_t; } znode_hold_t;
static inline uint64_t static inline uint64_t

View File

@ -162,8 +162,7 @@ zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
znode_hold_t *zh = buf; znode_hold_t *zh = buf;
mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
zfs_refcount_create(&zh->zh_refcount); zh->zh_refcount = 0;
zh->zh_obj = ZFS_NO_OBJECT;
return (0); return (0);
} }
@ -174,7 +173,6 @@ zfs_znode_hold_cache_destructor(void *buf, void *arg)
znode_hold_t *zh = buf; znode_hold_t *zh = buf;
mutex_destroy(&zh->zh_lock); mutex_destroy(&zh->zh_lock);
zfs_refcount_destroy(&zh->zh_refcount);
} }
void void
@ -273,26 +271,26 @@ zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
boolean_t found = B_FALSE; boolean_t found = B_FALSE;
zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP); zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
zh_new->zh_obj = obj;
search.zh_obj = obj; search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]); mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL); zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
if (likely(zh == NULL)) { if (likely(zh == NULL)) {
zh = zh_new; zh = zh_new;
zh->zh_obj = obj;
avl_add(&zfsvfs->z_hold_trees[i], zh); avl_add(&zfsvfs->z_hold_trees[i], zh);
} else { } else {
ASSERT3U(zh->zh_obj, ==, obj); ASSERT3U(zh->zh_obj, ==, obj);
found = B_TRUE; found = B_TRUE;
} }
zfs_refcount_add(&zh->zh_refcount, NULL); zh->zh_refcount++;
ASSERT3S(zh->zh_refcount, >, 0);
mutex_exit(&zfsvfs->z_hold_locks[i]); mutex_exit(&zfsvfs->z_hold_locks[i]);
if (found == B_TRUE) if (found == B_TRUE)
kmem_cache_free(znode_hold_cache, zh_new); kmem_cache_free(znode_hold_cache, zh_new);
ASSERT(MUTEX_NOT_HELD(&zh->zh_lock)); ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
mutex_enter(&zh->zh_lock); mutex_enter(&zh->zh_lock);
return (zh); return (zh);
@ -305,11 +303,11 @@ zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
boolean_t remove = B_FALSE; boolean_t remove = B_FALSE;
ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj)); ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
mutex_exit(&zh->zh_lock); mutex_exit(&zh->zh_lock);
mutex_enter(&zfsvfs->z_hold_locks[i]); mutex_enter(&zfsvfs->z_hold_locks[i]);
if (zfs_refcount_remove(&zh->zh_refcount, NULL) == 0) { ASSERT3S(zh->zh_refcount, >, 0);
if (--zh->zh_refcount == 0) {
avl_remove(&zfsvfs->z_hold_trees[i], zh); avl_remove(&zfsvfs->z_hold_trees[i], zh);
remove = B_TRUE; remove = B_TRUE;
} }