Prefix struct rangelock

A struct rangelock already exists on FreeBSD.  Add a zfs_ prefix as
per our convention to prevent any conflict with existing symbols.
This change is a follow up to 2cc479d0.

Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matt Macy <mmacy@FreeBSD.org>
Closes #9534
This commit is contained in:
Matthew Macy 2019-11-01 10:37:33 -07:00 committed by Brian Behlendorf
parent bbc18de83a
commit bd4dde8ef7
9 changed files with 80 additions and 74 deletions

View File

@ -2197,7 +2197,7 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb,
zgd->zgd_private = zd;
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = (struct locked_range *)ztest_range_lock(zd,
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER);
error = dmu_read(os, object, offset, size, buf,
@ -2212,7 +2212,7 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb,
offset = 0;
}
zgd->zgd_lr = (struct locked_range *)ztest_range_lock(zd,
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
object, offset, size, RL_READER);
error = dmu_buf_hold(os, object, offset, zgd, &db,

View File

@ -1045,7 +1045,7 @@ typedef struct zgd {
struct lwb *zgd_lwb;
struct blkptr *zgd_bp;
dmu_buf_t *zgd_db;
struct locked_range *zgd_lr;
struct zfs_locked_range *zgd_lr;
void *zgd_private;
} zgd_t;

View File

@ -39,40 +39,40 @@ typedef enum {
RL_READER,
RL_WRITER,
RL_APPEND
} rangelock_type_t;
} zfs_rangelock_type_t;
struct locked_range;
struct zfs_locked_range;
typedef void (rangelock_cb_t)(struct locked_range *, void *);
typedef void (zfs_rangelock_cb_t)(struct zfs_locked_range *, void *);
typedef struct rangelock {
typedef struct zfs_rangelock {
avl_tree_t rl_tree; /* contains locked_range_t */
kmutex_t rl_lock;
rangelock_cb_t *rl_cb;
zfs_rangelock_cb_t *rl_cb;
void *rl_arg;
} rangelock_t;
} zfs_rangelock_t;
typedef struct locked_range {
rangelock_t *lr_rangelock; /* rangelock that this lock applies to */
typedef struct zfs_locked_range {
zfs_rangelock_t *lr_rangelock; /* rangelock that this lock applies to */
avl_node_t lr_node; /* avl node link */
uint64_t lr_offset; /* file range offset */
uint64_t lr_length; /* file range length */
uint_t lr_count; /* range reference count in tree */
rangelock_type_t lr_type; /* range type */
zfs_rangelock_type_t lr_type; /* range type */
kcondvar_t lr_write_cv; /* cv for waiting writers */
kcondvar_t lr_read_cv; /* cv for waiting readers */
uint8_t lr_proxy; /* acting for original range */
uint8_t lr_write_wanted; /* writer wants to lock this range */
uint8_t lr_read_wanted; /* reader wants to lock this range */
} locked_range_t;
} zfs_locked_range_t;
void zfs_rangelock_init(rangelock_t *, rangelock_cb_t *, void *);
void zfs_rangelock_fini(rangelock_t *);
void zfs_rangelock_init(zfs_rangelock_t *, zfs_rangelock_cb_t *, void *);
void zfs_rangelock_fini(zfs_rangelock_t *);
locked_range_t *zfs_rangelock_enter(rangelock_t *,
uint64_t, uint64_t, rangelock_type_t);
void zfs_rangelock_exit(locked_range_t *);
void zfs_rangelock_reduce(locked_range_t *, uint64_t, uint64_t);
zfs_locked_range_t *zfs_rangelock_enter(zfs_rangelock_t *,
uint64_t, uint64_t, zfs_rangelock_type_t);
void zfs_rangelock_exit(zfs_locked_range_t *);
void zfs_rangelock_reduce(zfs_locked_range_t *, uint64_t, uint64_t);
#ifdef __cplusplus
}

View File

@ -191,7 +191,7 @@ typedef struct znode {
krwlock_t z_parent_lock; /* parent lock for directories */
krwlock_t z_name_lock; /* "master" lock for dirent locks */
zfs_dirlock_t *z_dirlocks; /* directory entry lock list */
rangelock_t z_rangelock; /* file range locks */
zfs_rangelock_t z_rangelock; /* file range locks */
boolean_t z_unlinked; /* file has been unlinked */
boolean_t z_atime_dirty; /* atime needs to be synced */
boolean_t z_zn_prefetch; /* Prefetch znodes? */

View File

@ -47,7 +47,7 @@ typedef struct zvol_state {
uint32_t zv_open_count; /* open counts */
uint32_t zv_changed; /* disk changed */
zilog_t *zv_zilog; /* ZIL handle */
rangelock_t zv_rangelock; /* for range locking */
zfs_rangelock_t zv_rangelock; /* for range locking */
dnode_t *zv_dn; /* dnode hold */
list_node_t zv_next; /* next zvol_state_t linkage */
uint64_t zv_hash; /* name hash */

View File

@ -485,7 +485,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
/*
* Lock the range against changes.
*/
locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
uio->uio_loffset, uio->uio_resid, RL_READER);
/*
@ -666,7 +666,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
/*
* If in append mode, set the io offset pointer to eof.
*/
locked_range_t *lr;
zfs_locked_range_t *lr;
if (ioflag & FAPPEND) {
/*
* Obtain an appending range lock to guarantee file append
@ -4518,7 +4518,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
redirty_page_for_writepage(wbc, pp);
unlock_page(pp);
locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
pgoff, pglen, RL_WRITER);
lock_page(pp);

View File

@ -90,7 +90,7 @@ int zfs_unlink_suspend_progress = 0;
* called with the rangelock_t's rl_lock held, which avoids races.
*/
static void
zfs_rangelock_cb(locked_range_t *new, void *arg)
zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
{
znode_t *zp = arg;
@ -1468,7 +1468,7 @@ zfs_extend(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
locked_range_t *lr;
zfs_locked_range_t *lr;
uint64_t newblksz;
int error;
@ -1586,7 +1586,7 @@ static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
locked_range_t *lr;
zfs_locked_range_t *lr;
int error;
/*
@ -1666,7 +1666,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
locked_range_t *lr;
zfs_locked_range_t *lr;
int error;
sa_bulk_attr_t bulk[2];
int count = 0;

View File

@ -57,7 +57,7 @@ static struct ida zvol_ida;
typedef struct zv_request {
zvol_state_t *zv;
struct bio *bio;
locked_range_t *lr;
zfs_locked_range_t *lr;
} zv_request_t;
/*

View File

@ -106,8 +106,8 @@
static int
zfs_rangelock_compare(const void *arg1, const void *arg2)
{
const locked_range_t *rl1 = (const locked_range_t *)arg1;
const locked_range_t *rl2 = (const locked_range_t *)arg2;
const zfs_locked_range_t *rl1 = (const zfs_locked_range_t *)arg1;
const zfs_locked_range_t *rl2 = (const zfs_locked_range_t *)arg2;
return (TREE_CMP(rl1->lr_offset, rl2->lr_offset));
}
@ -118,17 +118,17 @@ zfs_rangelock_compare(const void *arg1, const void *arg2)
* and may increase the range that's locked for RL_WRITER.
*/
void
zfs_rangelock_init(rangelock_t *rl, rangelock_cb_t *cb, void *arg)
zfs_rangelock_init(zfs_rangelock_t *rl, zfs_rangelock_cb_t *cb, void *arg)
{
mutex_init(&rl->rl_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&rl->rl_tree, zfs_rangelock_compare,
sizeof (locked_range_t), offsetof(locked_range_t, lr_node));
sizeof (zfs_locked_range_t), offsetof(zfs_locked_range_t, lr_node));
rl->rl_cb = cb;
rl->rl_arg = arg;
}
void
zfs_rangelock_fini(rangelock_t *rl)
zfs_rangelock_fini(zfs_rangelock_t *rl)
{
mutex_destroy(&rl->rl_lock);
avl_destroy(&rl->rl_tree);
@ -138,14 +138,14 @@ zfs_rangelock_fini(rangelock_t *rl)
* Check if a write lock can be grabbed, or wait and recheck until available.
*/
static void
zfs_rangelock_enter_writer(rangelock_t *rl, locked_range_t *new)
zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new)
{
avl_tree_t *tree = &rl->rl_tree;
locked_range_t *lr;
zfs_locked_range_t *lr;
avl_index_t where;
uint64_t orig_off = new->lr_offset;
uint64_t orig_len = new->lr_length;
rangelock_type_t orig_type = new->lr_type;
zfs_rangelock_type_t orig_type = new->lr_type;
for (;;) {
/*
@ -178,12 +178,12 @@ zfs_rangelock_enter_writer(rangelock_t *rl, locked_range_t *new)
if (lr != NULL)
goto wait; /* already locked at same offset */
lr = (locked_range_t *)avl_nearest(tree, where, AVL_AFTER);
lr = avl_nearest(tree, where, AVL_AFTER);
if (lr != NULL &&
lr->lr_offset < new->lr_offset + new->lr_length)
goto wait;
lr = (locked_range_t *)avl_nearest(tree, where, AVL_BEFORE);
lr = avl_nearest(tree, where, AVL_BEFORE);
if (lr != NULL &&
lr->lr_offset + lr->lr_length > new->lr_offset)
goto wait;
@ -208,10 +208,10 @@ wait:
* If this is an original (non-proxy) lock then replace it by
* a proxy and return the proxy.
*/
static locked_range_t *
zfs_rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
static zfs_locked_range_t *
zfs_rangelock_proxify(avl_tree_t *tree, zfs_locked_range_t *lr)
{
locked_range_t *proxy;
zfs_locked_range_t *proxy;
if (lr->lr_proxy)
return (lr); /* already a proxy */
@ -223,7 +223,7 @@ zfs_rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
lr->lr_count = 0;
/* create a proxy range lock */
proxy = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
proxy = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP);
proxy->lr_offset = lr->lr_offset;
proxy->lr_length = lr->lr_length;
proxy->lr_count = 1;
@ -240,9 +240,11 @@ zfs_rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
* Split the range lock at the supplied offset
* returning the *front* proxy.
*/
static locked_range_t *
zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
static zfs_locked_range_t *
zfs_rangelock_split(avl_tree_t *tree, zfs_locked_range_t *lr, uint64_t off)
{
zfs_locked_range_t *rear;
ASSERT3U(lr->lr_length, >, 1);
ASSERT3U(off, >, lr->lr_offset);
ASSERT3U(off, <, lr->lr_offset + lr->lr_length);
@ -250,7 +252,7 @@ zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
ASSERT(lr->lr_read_wanted == B_FALSE);
/* create the rear proxy range lock */
locked_range_t *rear = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
rear = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP);
rear->lr_offset = off;
rear->lr_length = lr->lr_offset + lr->lr_length - off;
rear->lr_count = lr->lr_count;
@ -259,7 +261,7 @@ zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
rear->lr_write_wanted = B_FALSE;
rear->lr_read_wanted = B_FALSE;
locked_range_t *front = zfs_rangelock_proxify(tree, lr);
zfs_locked_range_t *front = zfs_rangelock_proxify(tree, lr);
front->lr_length = off - lr->lr_offset;
avl_insert_here(tree, rear, front, AVL_AFTER);
@ -272,8 +274,10 @@ zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
static void
zfs_rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
{
zfs_locked_range_t *lr;
ASSERT(len != 0);
locked_range_t *lr = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
lr = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP);
lr->lr_offset = off;
lr->lr_length = len;
lr->lr_count = 1;
@ -285,10 +289,10 @@ zfs_rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
}
static void
zfs_rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
locked_range_t *prev, avl_index_t where)
zfs_rangelock_add_reader(avl_tree_t *tree, zfs_locked_range_t *new,
zfs_locked_range_t *prev, avl_index_t where)
{
locked_range_t *next;
zfs_locked_range_t *next;
uint64_t off = new->lr_offset;
uint64_t len = new->lr_length;
@ -375,10 +379,10 @@ zfs_rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
* Check if a reader lock can be grabbed, or wait and recheck until available.
*/
static void
zfs_rangelock_enter_reader(rangelock_t *rl, locked_range_t *new)
zfs_rangelock_enter_reader(zfs_rangelock_t *rl, zfs_locked_range_t *new)
{
avl_tree_t *tree = &rl->rl_tree;
locked_range_t *prev, *next;
zfs_locked_range_t *prev, *next;
avl_index_t where;
uint64_t off = new->lr_offset;
uint64_t len = new->lr_length;
@ -389,7 +393,7 @@ zfs_rangelock_enter_reader(rangelock_t *rl, locked_range_t *new)
retry:
prev = avl_find(tree, new, &where);
if (prev == NULL)
prev = (locked_range_t *)avl_nearest(tree, where, AVL_BEFORE);
prev = avl_nearest(tree, where, AVL_BEFORE);
/*
* Check the previous range for a writer lock overlap.
@ -415,7 +419,7 @@ retry:
if (prev != NULL)
next = AVL_NEXT(tree, prev);
else
next = (locked_range_t *)avl_nearest(tree, where, AVL_AFTER);
next = avl_nearest(tree, where, AVL_AFTER);
for (; next != NULL; next = AVL_NEXT(tree, next)) {
if (off + len <= next->lr_offset)
goto got_lock;
@ -447,13 +451,15 @@ got_lock:
* the range lock structure for later unlocking (or reduce range if the
* entire file is locked as RL_WRITER).
*/
locked_range_t *
zfs_rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
rangelock_type_t type)
zfs_locked_range_t *
zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
zfs_rangelock_type_t type)
{
zfs_locked_range_t *new;
ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
locked_range_t *new = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
new = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP);
new->lr_rangelock = rl;
new->lr_offset = off;
if (len + off < off) /* overflow */
@ -483,10 +489,10 @@ zfs_rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
}
/*
* Safely free the locked_range_t.
* Safely free the zfs_locked_range_t.
*/
static void
zfs_rangelock_free(locked_range_t *lr)
zfs_rangelock_free(zfs_locked_range_t *lr)
{
if (lr->lr_write_wanted)
cv_destroy(&lr->lr_write_cv);
@ -494,14 +500,14 @@ zfs_rangelock_free(locked_range_t *lr)
if (lr->lr_read_wanted)
cv_destroy(&lr->lr_read_cv);
kmem_free(lr, sizeof (locked_range_t));
kmem_free(lr, sizeof (zfs_locked_range_t));
}
/*
* Unlock a reader lock
*/
static void
zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
zfs_rangelock_exit_reader(zfs_rangelock_t *rl, zfs_locked_range_t *remove,
list_t *free_list)
{
avl_tree_t *tree = &rl->rl_tree;
@ -530,11 +536,11 @@ zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
* then decrement ref count on all proxies
* that make up this range, freeing them as needed.
*/
locked_range_t *lr = avl_find(tree, remove, NULL);
zfs_locked_range_t *lr = avl_find(tree, remove, NULL);
ASSERT3P(lr, !=, NULL);
ASSERT3U(lr->lr_count, !=, 0);
ASSERT3U(lr->lr_type, ==, RL_READER);
locked_range_t *next = NULL;
zfs_locked_range_t *next = NULL;
for (len = remove->lr_length; len != 0; lr = next) {
len -= lr->lr_length;
if (len != 0) {
@ -555,7 +561,7 @@ zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
list_insert_tail(free_list, lr);
}
}
kmem_free(remove, sizeof (locked_range_t));
kmem_free(remove, sizeof (zfs_locked_range_t));
}
}
@ -563,11 +569,11 @@ zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
* Unlock range and destroy range lock structure.
*/
void
zfs_rangelock_exit(locked_range_t *lr)
zfs_rangelock_exit(zfs_locked_range_t *lr)
{
rangelock_t *rl = lr->lr_rangelock;
zfs_rangelock_t *rl = lr->lr_rangelock;
list_t free_list;
locked_range_t *free_lr;
zfs_locked_range_t *free_lr;
ASSERT(lr->lr_type == RL_WRITER || lr->lr_type == RL_READER);
ASSERT(lr->lr_count == 1 || lr->lr_count == 0);
@ -577,8 +583,8 @@ zfs_rangelock_exit(locked_range_t *lr)
* The free list is used to defer the cv_destroy() and
* subsequent kmem_free until after the mutex is dropped.
*/
list_create(&free_list, sizeof (locked_range_t),
offsetof(locked_range_t, lr_node));
list_create(&free_list, sizeof (zfs_locked_range_t),
offsetof(zfs_locked_range_t, lr_node));
mutex_enter(&rl->rl_lock);
if (lr->lr_type == RL_WRITER) {
@ -592,7 +598,7 @@ zfs_rangelock_exit(locked_range_t *lr)
} else {
/*
* lock may be shared, let rangelock_exit_reader()
* release the lock and free the locked_range_t.
* release the lock and free the zfs_locked_range_t.
*/
zfs_rangelock_exit_reader(rl, lr, &free_list);
}
@ -610,9 +616,9 @@ zfs_rangelock_exit(locked_range_t *lr)
* entry in the tree.
*/
void
zfs_rangelock_reduce(locked_range_t *lr, uint64_t off, uint64_t len)
zfs_rangelock_reduce(zfs_locked_range_t *lr, uint64_t off, uint64_t len)
{
rangelock_t *rl = lr->lr_rangelock;
zfs_rangelock_t *rl = lr->lr_rangelock;
/* Ensure there are no other locks */
ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1);