Rename rangelock_ functions to zfs_rangelock_

A rangelock KPI already exists on FreeBSD.  Add a zfs_ prefix as
per our convention to prevent any conflict with existing symbols.

Reviewed-by: Igor Kozhukhov <igor@dilos.org>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matt Macy <mmacy@FreeBSD.org>
Closes #9402
This commit is contained in:
Matthew Macy 2019-10-03 15:54:29 -07:00 committed by Brian Behlendorf
parent 64b6c47d90
commit 2cc479d049
6 changed files with 84 additions and 82 deletions

View File

@ -66,13 +66,13 @@ typedef struct locked_range {
uint8_t lr_read_wanted; /* reader wants to lock this range */
} locked_range_t;
void rangelock_init(rangelock_t *, rangelock_cb_t *, void *);
void rangelock_fini(rangelock_t *);
void zfs_rangelock_init(rangelock_t *, rangelock_cb_t *, void *);
void zfs_rangelock_fini(rangelock_t *);
locked_range_t *rangelock_enter(rangelock_t *,
locked_range_t *zfs_rangelock_enter(rangelock_t *,
uint64_t, uint64_t, rangelock_type_t);
void rangelock_exit(locked_range_t *);
void rangelock_reduce(locked_range_t *, uint64_t, uint64_t);
void zfs_rangelock_exit(locked_range_t *);
void zfs_rangelock_reduce(locked_range_t *, uint64_t, uint64_t);
#ifdef __cplusplus
}

View File

@ -485,7 +485,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
/*
* Lock the range against changes.
*/
locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
uio->uio_loffset, uio->uio_resid, RL_READER);
/*
@ -558,7 +558,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
dataset_kstats_update_read_kstats(&zfsvfs->z_kstat, nread);
task_io_account_read(nread);
out:
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (error);
@ -672,7 +672,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
lr = rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, n, RL_APPEND);
woff = lr->lr_offset;
if (lr->lr_length == UINT64_MAX) {
/*
@ -689,11 +689,11 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
lr = rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
lr = zfs_rangelock_enter(&zp->z_rangelock, woff, n, RL_WRITER);
}
if (woff >= limit) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFBIG));
}
@ -811,7 +811,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
new_blksz = MIN(end_size, max_blksz);
}
zfs_grow_blocksize(zp, new_blksz, tx);
rangelock_reduce(lr, woff, n);
zfs_rangelock_reduce(lr, woff, n);
}
/*
@ -950,7 +950,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
}
zfs_inode_update(zp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
/*
* If we're in replay mode, or we made no progress, return error.
@ -1003,7 +1003,7 @@ zfs_get_done(zgd_t *zgd, int error)
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
rangelock_exit(zgd->zgd_lr);
zfs_rangelock_exit(zgd->zgd_lr);
/*
* Release the vnode asynchronously as we currently have the
@ -1064,7 +1064,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
@ -1086,12 +1086,12 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
zgd->zgd_lr = rangelock_enter(&zp->z_rangelock,
zgd->zgd_lr = zfs_rangelock_enter(&zp->z_rangelock,
offset, size, RL_READER);
if (zp->z_blksz == size)
break;
offset += blkoff;
rangelock_exit(zgd->zgd_lr);
zfs_rangelock_exit(zgd->zgd_lr);
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
@ -4517,14 +4517,14 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
redirty_page_for_writepage(wbc, pp);
unlock_page(pp);
locked_range_t *lr = rangelock_enter(&zp->z_rangelock,
locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
pgoff, pglen, RL_WRITER);
lock_page(pp);
/* Page mapping changed or it was no longer dirty, we're done */
if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
unlock_page(pp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (0);
}
@ -4532,7 +4532,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
/* Another process started write block if required */
if (PageWriteback(pp)) {
unlock_page(pp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(pp))
@ -4546,7 +4546,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
/* Clear the dirty flag the required locks are held */
if (!clear_page_dirty_for_io(pp)) {
unlock_page(pp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (0);
}
@ -4573,7 +4573,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
__set_page_dirty_nobuffers(pp);
ClearPageError(pp);
end_page_writeback(pp);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (err);
}
@ -4600,7 +4600,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
zfs_putpage_commit_cb, pp);
dmu_tx_commit(tx);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
if (wbc->sync_mode != WB_SYNC_NONE) {
/*

View File

@ -129,7 +129,7 @@ zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zp->z_dirlocks = NULL;
zp->z_acl_cached = NULL;
@ -151,7 +151,7 @@ zfs_znode_cache_destructor(void *buf, void *arg)
rw_destroy(&zp->z_name_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
rangelock_fini(&zp->z_rangelock);
zfs_rangelock_fini(&zp->z_rangelock);
ASSERT(zp->z_dirlocks == NULL);
ASSERT(zp->z_acl_cached == NULL);
@ -1475,13 +1475,13 @@ zfs_extend(znode_t *zp, uint64_t end)
/*
* We will change zp_size, lock the whole file.
*/
lr = rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end <= zp->z_size) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (0);
}
tx = dmu_tx_create(zfsvfs->z_os);
@ -1511,7 +1511,7 @@ zfs_extend(znode_t *zp, uint64_t end)
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (error);
}
@ -1523,7 +1523,7 @@ zfs_extend(znode_t *zp, uint64_t end)
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
rangelock_exit(lr);
zfs_rangelock_exit(lr);
dmu_tx_commit(tx);
@ -1592,13 +1592,13 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
/*
* Lock the range being freed.
*/
lr = rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (off >= zp->z_size) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (0);
}
@ -1648,7 +1648,7 @@ zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
page_len);
}
}
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (error);
}
@ -1674,20 +1674,20 @@ zfs_trunc(znode_t *zp, uint64_t end)
/*
* We will change zp_size, lock the whole file.
*/
lr = rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end >= zp->z_size) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (0);
}
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
DMU_OBJECT_END);
if (error) {
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
@ -1697,7 +1697,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (error);
}
@ -1713,7 +1713,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
rangelock_exit(lr);
zfs_rangelock_exit(lr);
return (0);
}

View File

@ -142,7 +142,7 @@ zvol_write(void *arg)
if (error)
break;
}
rangelock_exit(zvr->lr);
zfs_rangelock_exit(zvr->lr);
int64_t nwritten = start_resid - uio.uio_resid;
dataset_kstats_update_write_kstats(&zv->zv_zso->zvo_kstat, nwritten);
@ -213,7 +213,7 @@ zvol_discard(void *arg)
ZVOL_OBJ, start, size);
}
unlock:
rangelock_exit(zvr->lr);
zfs_rangelock_exit(zvr->lr);
if (error == 0 && sync)
zil_commit(zv->zv_zilog, ZVOL_OBJ);
@ -259,7 +259,7 @@ zvol_read(void *arg)
break;
}
}
rangelock_exit(zvr->lr);
zfs_rangelock_exit(zvr->lr);
int64_t nread = start_resid - uio.uio_resid;
dataset_kstats_update_read_kstats(&zv->zv_zso->zvo_kstat, nread);
@ -344,7 +344,7 @@ zvol_request(struct request_queue *q, struct bio *bio)
* are asynchronous, we take it here synchronously to make
* sure overlapped I/Os are properly ordered.
*/
zvr->lr = rangelock_enter(&zv->zv_rangelock, offset, size,
zvr->lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, size,
RL_WRITER);
/*
* Sync writes and discards execute zil_commit() which may need
@ -383,7 +383,7 @@ zvol_request(struct request_queue *q, struct bio *bio)
rw_enter(&zv->zv_suspend_lock, RW_READER);
zvr->lr = rangelock_enter(&zv->zv_rangelock, offset, size,
zvr->lr = zfs_rangelock_enter(&zv->zv_rangelock, offset, size,
RL_READER);
if (zvol_request_sync || taskq_dispatch(zvol_taskq,
zvol_read, zvr, TQ_SLEEP) == TASKQID_INVALID)
@ -799,7 +799,7 @@ zvol_alloc(dev_t dev, const char *name)
zv->zv_open_count = 0;
strlcpy(zv->zv_name, name, MAXNAMELEN);
rangelock_init(&zv->zv_rangelock, NULL, NULL);
zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
zv->zv_zso->zvo_disk->major = zvol_major;
@ -861,7 +861,7 @@ zvol_free(zvol_state_t *zv)
ASSERT(zv->zv_zso->zvo_disk->private_data == NULL);
rw_destroy(&zv->zv_suspend_lock);
rangelock_fini(&zv->zv_rangelock);
zfs_rangelock_fini(&zv->zv_rangelock);
del_gendisk(zv->zv_zso->zvo_disk);
blk_cleanup_queue(zv->zv_zso->zvo_queue);

View File

@ -104,7 +104,7 @@
* Locks are ordered on the start offset of the range.
*/
static int
rangelock_compare(const void *arg1, const void *arg2)
zfs_rangelock_compare(const void *arg1, const void *arg2)
{
const locked_range_t *rl1 = (const locked_range_t *)arg1;
const locked_range_t *rl2 = (const locked_range_t *)arg2;
@ -118,17 +118,17 @@ rangelock_compare(const void *arg1, const void *arg2)
* and may increase the range that's locked for RL_WRITER.
*/
void
rangelock_init(rangelock_t *rl, rangelock_cb_t *cb, void *arg)
zfs_rangelock_init(rangelock_t *rl, rangelock_cb_t *cb, void *arg)
{
mutex_init(&rl->rl_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&rl->rl_tree, rangelock_compare,
avl_create(&rl->rl_tree, zfs_rangelock_compare,
sizeof (locked_range_t), offsetof(locked_range_t, lr_node));
rl->rl_cb = cb;
rl->rl_arg = arg;
}
void
rangelock_fini(rangelock_t *rl)
zfs_rangelock_fini(rangelock_t *rl)
{
mutex_destroy(&rl->rl_lock);
avl_destroy(&rl->rl_tree);
@ -138,7 +138,7 @@ rangelock_fini(rangelock_t *rl)
* Check if a write lock can be grabbed, or wait and recheck until available.
*/
static void
rangelock_enter_writer(rangelock_t *rl, locked_range_t *new)
zfs_rangelock_enter_writer(rangelock_t *rl, locked_range_t *new)
{
avl_tree_t *tree = &rl->rl_tree;
locked_range_t *lr;
@ -209,7 +209,7 @@ wait:
* a proxy and return the proxy.
*/
static locked_range_t *
rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
zfs_rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
{
locked_range_t *proxy;
@ -241,7 +241,7 @@ rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
* returning the *front* proxy.
*/
static locked_range_t *
rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
{
ASSERT3U(lr->lr_length, >, 1);
ASSERT3U(off, >, lr->lr_offset);
@ -259,7 +259,7 @@ rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
rear->lr_write_wanted = B_FALSE;
rear->lr_read_wanted = B_FALSE;
locked_range_t *front = rangelock_proxify(tree, lr);
locked_range_t *front = zfs_rangelock_proxify(tree, lr);
front->lr_length = off - lr->lr_offset;
avl_insert_here(tree, rear, front, AVL_AFTER);
@ -270,7 +270,7 @@ rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
* Create and add a new proxy range lock for the supplied range.
*/
static void
rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
zfs_rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
{
ASSERT(len != 0);
locked_range_t *lr = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
@ -285,7 +285,7 @@ rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
}
static void
rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
zfs_rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
locked_range_t *prev, avl_index_t where)
{
locked_range_t *next;
@ -307,7 +307,7 @@ rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
* convert to proxy if needed then
* split this entry and bump ref count
*/
prev = rangelock_split(tree, prev, off);
prev = zfs_rangelock_split(tree, prev, off);
prev = AVL_NEXT(tree, prev); /* move to rear range */
}
}
@ -326,7 +326,7 @@ rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
if (off < next->lr_offset) {
/* Add a proxy for initial range before the overlap */
rangelock_new_proxy(tree, off, next->lr_offset - off);
zfs_rangelock_new_proxy(tree, off, next->lr_offset - off);
}
new->lr_count = 0; /* will use proxies in tree */
@ -344,30 +344,30 @@ rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
/* there's a gap */
ASSERT3U(next->lr_offset, >,
prev->lr_offset + prev->lr_length);
rangelock_new_proxy(tree,
zfs_rangelock_new_proxy(tree,
prev->lr_offset + prev->lr_length,
next->lr_offset -
(prev->lr_offset + prev->lr_length));
}
if (off + len == next->lr_offset + next->lr_length) {
/* exact overlap with end */
next = rangelock_proxify(tree, next);
next = zfs_rangelock_proxify(tree, next);
next->lr_count++;
return;
}
if (off + len < next->lr_offset + next->lr_length) {
/* new range ends in the middle of this block */
next = rangelock_split(tree, next, off + len);
next = zfs_rangelock_split(tree, next, off + len);
next->lr_count++;
return;
}
ASSERT3U(off + len, >, next->lr_offset + next->lr_length);
next = rangelock_proxify(tree, next);
next = zfs_rangelock_proxify(tree, next);
next->lr_count++;
}
/* Add the remaining end range. */
rangelock_new_proxy(tree, prev->lr_offset + prev->lr_length,
zfs_rangelock_new_proxy(tree, prev->lr_offset + prev->lr_length,
(off + len) - (prev->lr_offset + prev->lr_length));
}
@ -375,7 +375,7 @@ rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
* Check if a reader lock can be grabbed, or wait and recheck until available.
*/
static void
rangelock_enter_reader(rangelock_t *rl, locked_range_t *new)
zfs_rangelock_enter_reader(rangelock_t *rl, locked_range_t *new)
{
avl_tree_t *tree = &rl->rl_tree;
locked_range_t *prev, *next;
@ -437,7 +437,7 @@ got_lock:
* Add the read lock, which may involve splitting existing
* locks and bumping ref counts (r_count).
*/
rangelock_add_reader(tree, new, prev, where);
zfs_rangelock_add_reader(tree, new, prev, where);
}
/*
@ -448,7 +448,7 @@ got_lock:
* entire file is locked as RL_WRITER).
*/
locked_range_t *
rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
zfs_rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
rangelock_type_t type)
{
ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
@ -473,9 +473,11 @@ rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
if (avl_numnodes(&rl->rl_tree) == 0)
avl_add(&rl->rl_tree, new);
else
rangelock_enter_reader(rl, new);
} else
rangelock_enter_writer(rl, new); /* RL_WRITER or RL_APPEND */
zfs_rangelock_enter_reader(rl, new);
} else {
/* RL_WRITER or RL_APPEND */
zfs_rangelock_enter_writer(rl, new);
}
mutex_exit(&rl->rl_lock);
return (new);
}
@ -484,7 +486,7 @@ rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
* Safely free the locked_range_t.
*/
static void
rangelock_free(locked_range_t *lr)
zfs_rangelock_free(locked_range_t *lr)
{
if (lr->lr_write_wanted)
cv_destroy(&lr->lr_write_cv);
@ -499,7 +501,7 @@ rangelock_free(locked_range_t *lr)
* Unlock a reader lock
*/
static void
rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
list_t *free_list)
{
avl_tree_t *tree = &rl->rl_tree;
@ -561,7 +563,7 @@ rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
* Unlock range and destroy range lock structure.
*/
void
rangelock_exit(locked_range_t *lr)
zfs_rangelock_exit(locked_range_t *lr)
{
rangelock_t *rl = lr->lr_rangelock;
list_t free_list;
@ -592,12 +594,12 @@ rangelock_exit(locked_range_t *lr)
* lock may be shared, let rangelock_exit_reader()
* release the lock and free the locked_range_t.
*/
rangelock_exit_reader(rl, lr, &free_list);
zfs_rangelock_exit_reader(rl, lr, &free_list);
}
mutex_exit(&rl->rl_lock);
while ((free_lr = list_remove_head(&free_list)) != NULL)
rangelock_free(free_lr);
zfs_rangelock_free(free_lr);
list_destroy(&free_list);
}
@ -608,7 +610,7 @@ rangelock_exit(locked_range_t *lr)
* entry in the tree.
*/
void
rangelock_reduce(locked_range_t *lr, uint64_t off, uint64_t len)
zfs_rangelock_reduce(locked_range_t *lr, uint64_t off, uint64_t len)
{
rangelock_t *rl = lr->lr_rangelock;
@ -631,9 +633,9 @@ rangelock_reduce(locked_range_t *lr, uint64_t off, uint64_t len)
}
#if defined(_KERNEL)
EXPORT_SYMBOL(rangelock_init);
EXPORT_SYMBOL(rangelock_fini);
EXPORT_SYMBOL(rangelock_enter);
EXPORT_SYMBOL(rangelock_exit);
EXPORT_SYMBOL(rangelock_reduce);
EXPORT_SYMBOL(zfs_rangelock_init);
EXPORT_SYMBOL(zfs_rangelock_fini);
EXPORT_SYMBOL(zfs_rangelock_enter);
EXPORT_SYMBOL(zfs_rangelock_exit);
EXPORT_SYMBOL(zfs_rangelock_reduce);
#endif

View File

@ -652,7 +652,7 @@ zvol_get_done(zgd_t *zgd, int error)
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
rangelock_exit(zgd->zgd_lr);
zfs_rangelock_exit(zgd->zgd_lr);
kmem_free(zgd, sizeof (zgd_t));
}
@ -685,8 +685,8 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
RL_READER);
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
DMU_READ_NO_PREFETCH);
} else { /* indirect write */
@ -698,8 +698,8 @@ zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
*/
size = zv->zv_volblocksize;
offset = P2ALIGN_TYPED(offset, size, uint64_t);
zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
RL_READER);
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {