Prefix struct rangelock
A struct rangelock already exists on FreeBSD. Add a zfs_ prefix as
per our convention to prevent any conflict with existing symbols.
This change is a follow up to 2cc479d0
.
Reviewed-by: Matt Ahrens <matt@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matt Macy <mmacy@FreeBSD.org>
Closes #9534
This commit is contained in:
parent
5c474614ff
commit
ea15efd4c9
|
@ -2225,7 +2225,7 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb,
|
||||||
zgd->zgd_private = zd;
|
zgd->zgd_private = zd;
|
||||||
|
|
||||||
if (buf != NULL) { /* immediate write */
|
if (buf != NULL) { /* immediate write */
|
||||||
zgd->zgd_lr = (struct locked_range *)ztest_range_lock(zd,
|
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
|
||||||
object, offset, size, RL_READER);
|
object, offset, size, RL_READER);
|
||||||
|
|
||||||
error = dmu_read(os, object, offset, size, buf,
|
error = dmu_read(os, object, offset, size, buf,
|
||||||
|
@ -2240,7 +2240,7 @@ ztest_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb,
|
||||||
offset = 0;
|
offset = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
zgd->zgd_lr = (struct locked_range *)ztest_range_lock(zd,
|
zgd->zgd_lr = (struct zfs_locked_range *)ztest_range_lock(zd,
|
||||||
object, offset, size, RL_READER);
|
object, offset, size, RL_READER);
|
||||||
|
|
||||||
error = dmu_buf_hold(os, object, offset, zgd, &db,
|
error = dmu_buf_hold(os, object, offset, zgd, &db,
|
||||||
|
|
|
@ -1042,7 +1042,7 @@ typedef struct zgd {
|
||||||
struct lwb *zgd_lwb;
|
struct lwb *zgd_lwb;
|
||||||
struct blkptr *zgd_bp;
|
struct blkptr *zgd_bp;
|
||||||
dmu_buf_t *zgd_db;
|
dmu_buf_t *zgd_db;
|
||||||
struct locked_range *zgd_lr;
|
struct zfs_locked_range *zgd_lr;
|
||||||
void *zgd_private;
|
void *zgd_private;
|
||||||
} zgd_t;
|
} zgd_t;
|
||||||
|
|
||||||
|
|
|
@ -39,40 +39,40 @@ typedef enum {
|
||||||
RL_READER,
|
RL_READER,
|
||||||
RL_WRITER,
|
RL_WRITER,
|
||||||
RL_APPEND
|
RL_APPEND
|
||||||
} rangelock_type_t;
|
} zfs_rangelock_type_t;
|
||||||
|
|
||||||
struct locked_range;
|
struct zfs_locked_range;
|
||||||
|
|
||||||
typedef void (rangelock_cb_t)(struct locked_range *, void *);
|
typedef void (zfs_rangelock_cb_t)(struct zfs_locked_range *, void *);
|
||||||
|
|
||||||
typedef struct rangelock {
|
typedef struct zfs_rangelock {
|
||||||
avl_tree_t rl_tree; /* contains locked_range_t */
|
avl_tree_t rl_tree; /* contains locked_range_t */
|
||||||
kmutex_t rl_lock;
|
kmutex_t rl_lock;
|
||||||
rangelock_cb_t *rl_cb;
|
zfs_rangelock_cb_t *rl_cb;
|
||||||
void *rl_arg;
|
void *rl_arg;
|
||||||
} rangelock_t;
|
} zfs_rangelock_t;
|
||||||
|
|
||||||
typedef struct locked_range {
|
typedef struct zfs_locked_range {
|
||||||
rangelock_t *lr_rangelock; /* rangelock that this lock applies to */
|
zfs_rangelock_t *lr_rangelock; /* rangelock that this lock applies to */
|
||||||
avl_node_t lr_node; /* avl node link */
|
avl_node_t lr_node; /* avl node link */
|
||||||
uint64_t lr_offset; /* file range offset */
|
uint64_t lr_offset; /* file range offset */
|
||||||
uint64_t lr_length; /* file range length */
|
uint64_t lr_length; /* file range length */
|
||||||
uint_t lr_count; /* range reference count in tree */
|
uint_t lr_count; /* range reference count in tree */
|
||||||
rangelock_type_t lr_type; /* range type */
|
zfs_rangelock_type_t lr_type; /* range type */
|
||||||
kcondvar_t lr_write_cv; /* cv for waiting writers */
|
kcondvar_t lr_write_cv; /* cv for waiting writers */
|
||||||
kcondvar_t lr_read_cv; /* cv for waiting readers */
|
kcondvar_t lr_read_cv; /* cv for waiting readers */
|
||||||
uint8_t lr_proxy; /* acting for original range */
|
uint8_t lr_proxy; /* acting for original range */
|
||||||
uint8_t lr_write_wanted; /* writer wants to lock this range */
|
uint8_t lr_write_wanted; /* writer wants to lock this range */
|
||||||
uint8_t lr_read_wanted; /* reader wants to lock this range */
|
uint8_t lr_read_wanted; /* reader wants to lock this range */
|
||||||
} locked_range_t;
|
} zfs_locked_range_t;
|
||||||
|
|
||||||
void zfs_rangelock_init(rangelock_t *, rangelock_cb_t *, void *);
|
void zfs_rangelock_init(zfs_rangelock_t *, zfs_rangelock_cb_t *, void *);
|
||||||
void zfs_rangelock_fini(rangelock_t *);
|
void zfs_rangelock_fini(zfs_rangelock_t *);
|
||||||
|
|
||||||
locked_range_t *zfs_rangelock_enter(rangelock_t *,
|
zfs_locked_range_t *zfs_rangelock_enter(zfs_rangelock_t *,
|
||||||
uint64_t, uint64_t, rangelock_type_t);
|
uint64_t, uint64_t, zfs_rangelock_type_t);
|
||||||
void zfs_rangelock_exit(locked_range_t *);
|
void zfs_rangelock_exit(zfs_locked_range_t *);
|
||||||
void zfs_rangelock_reduce(locked_range_t *, uint64_t, uint64_t);
|
void zfs_rangelock_reduce(zfs_locked_range_t *, uint64_t, uint64_t);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -191,7 +191,7 @@ typedef struct znode {
|
||||||
krwlock_t z_parent_lock; /* parent lock for directories */
|
krwlock_t z_parent_lock; /* parent lock for directories */
|
||||||
krwlock_t z_name_lock; /* "master" lock for dirent locks */
|
krwlock_t z_name_lock; /* "master" lock for dirent locks */
|
||||||
zfs_dirlock_t *z_dirlocks; /* directory entry lock list */
|
zfs_dirlock_t *z_dirlocks; /* directory entry lock list */
|
||||||
rangelock_t z_rangelock; /* file range locks */
|
zfs_rangelock_t z_rangelock; /* file range locks */
|
||||||
boolean_t z_unlinked; /* file has been unlinked */
|
boolean_t z_unlinked; /* file has been unlinked */
|
||||||
boolean_t z_atime_dirty; /* atime needs to be synced */
|
boolean_t z_atime_dirty; /* atime needs to be synced */
|
||||||
boolean_t z_zn_prefetch; /* Prefetch znodes? */
|
boolean_t z_zn_prefetch; /* Prefetch znodes? */
|
||||||
|
|
|
@ -106,8 +106,8 @@
|
||||||
static int
|
static int
|
||||||
zfs_rangelock_compare(const void *arg1, const void *arg2)
|
zfs_rangelock_compare(const void *arg1, const void *arg2)
|
||||||
{
|
{
|
||||||
const locked_range_t *rl1 = (const locked_range_t *)arg1;
|
const zfs_locked_range_t *rl1 = (const zfs_locked_range_t *)arg1;
|
||||||
const locked_range_t *rl2 = (const locked_range_t *)arg2;
|
const zfs_locked_range_t *rl2 = (const zfs_locked_range_t *)arg2;
|
||||||
|
|
||||||
return (AVL_CMP(rl1->lr_offset, rl2->lr_offset));
|
return (AVL_CMP(rl1->lr_offset, rl2->lr_offset));
|
||||||
}
|
}
|
||||||
|
@ -118,17 +118,17 @@ zfs_rangelock_compare(const void *arg1, const void *arg2)
|
||||||
* and may increase the range that's locked for RL_WRITER.
|
* and may increase the range that's locked for RL_WRITER.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
zfs_rangelock_init(rangelock_t *rl, rangelock_cb_t *cb, void *arg)
|
zfs_rangelock_init(zfs_rangelock_t *rl, zfs_rangelock_cb_t *cb, void *arg)
|
||||||
{
|
{
|
||||||
mutex_init(&rl->rl_lock, NULL, MUTEX_DEFAULT, NULL);
|
mutex_init(&rl->rl_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||||
avl_create(&rl->rl_tree, zfs_rangelock_compare,
|
avl_create(&rl->rl_tree, zfs_rangelock_compare,
|
||||||
sizeof (locked_range_t), offsetof(locked_range_t, lr_node));
|
sizeof (zfs_locked_range_t), offsetof(zfs_locked_range_t, lr_node));
|
||||||
rl->rl_cb = cb;
|
rl->rl_cb = cb;
|
||||||
rl->rl_arg = arg;
|
rl->rl_arg = arg;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
zfs_rangelock_fini(rangelock_t *rl)
|
zfs_rangelock_fini(zfs_rangelock_t *rl)
|
||||||
{
|
{
|
||||||
mutex_destroy(&rl->rl_lock);
|
mutex_destroy(&rl->rl_lock);
|
||||||
avl_destroy(&rl->rl_tree);
|
avl_destroy(&rl->rl_tree);
|
||||||
|
@ -138,14 +138,14 @@ zfs_rangelock_fini(rangelock_t *rl)
|
||||||
* Check if a write lock can be grabbed, or wait and recheck until available.
|
* Check if a write lock can be grabbed, or wait and recheck until available.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
zfs_rangelock_enter_writer(rangelock_t *rl, locked_range_t *new)
|
zfs_rangelock_enter_writer(zfs_rangelock_t *rl, zfs_locked_range_t *new)
|
||||||
{
|
{
|
||||||
avl_tree_t *tree = &rl->rl_tree;
|
avl_tree_t *tree = &rl->rl_tree;
|
||||||
locked_range_t *lr;
|
zfs_locked_range_t *lr;
|
||||||
avl_index_t where;
|
avl_index_t where;
|
||||||
uint64_t orig_off = new->lr_offset;
|
uint64_t orig_off = new->lr_offset;
|
||||||
uint64_t orig_len = new->lr_length;
|
uint64_t orig_len = new->lr_length;
|
||||||
rangelock_type_t orig_type = new->lr_type;
|
zfs_rangelock_type_t orig_type = new->lr_type;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
/*
|
/*
|
||||||
|
@ -178,12 +178,12 @@ zfs_rangelock_enter_writer(rangelock_t *rl, locked_range_t *new)
|
||||||
if (lr != NULL)
|
if (lr != NULL)
|
||||||
goto wait; /* already locked at same offset */
|
goto wait; /* already locked at same offset */
|
||||||
|
|
||||||
lr = (locked_range_t *)avl_nearest(tree, where, AVL_AFTER);
|
lr = avl_nearest(tree, where, AVL_AFTER);
|
||||||
if (lr != NULL &&
|
if (lr != NULL &&
|
||||||
lr->lr_offset < new->lr_offset + new->lr_length)
|
lr->lr_offset < new->lr_offset + new->lr_length)
|
||||||
goto wait;
|
goto wait;
|
||||||
|
|
||||||
lr = (locked_range_t *)avl_nearest(tree, where, AVL_BEFORE);
|
lr = avl_nearest(tree, where, AVL_BEFORE);
|
||||||
if (lr != NULL &&
|
if (lr != NULL &&
|
||||||
lr->lr_offset + lr->lr_length > new->lr_offset)
|
lr->lr_offset + lr->lr_length > new->lr_offset)
|
||||||
goto wait;
|
goto wait;
|
||||||
|
@ -208,10 +208,10 @@ wait:
|
||||||
* If this is an original (non-proxy) lock then replace it by
|
* If this is an original (non-proxy) lock then replace it by
|
||||||
* a proxy and return the proxy.
|
* a proxy and return the proxy.
|
||||||
*/
|
*/
|
||||||
static locked_range_t *
|
static zfs_locked_range_t *
|
||||||
zfs_rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
|
zfs_rangelock_proxify(avl_tree_t *tree, zfs_locked_range_t *lr)
|
||||||
{
|
{
|
||||||
locked_range_t *proxy;
|
zfs_locked_range_t *proxy;
|
||||||
|
|
||||||
if (lr->lr_proxy)
|
if (lr->lr_proxy)
|
||||||
return (lr); /* already a proxy */
|
return (lr); /* already a proxy */
|
||||||
|
@ -223,7 +223,7 @@ zfs_rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
|
||||||
lr->lr_count = 0;
|
lr->lr_count = 0;
|
||||||
|
|
||||||
/* create a proxy range lock */
|
/* create a proxy range lock */
|
||||||
proxy = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
|
proxy = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP);
|
||||||
proxy->lr_offset = lr->lr_offset;
|
proxy->lr_offset = lr->lr_offset;
|
||||||
proxy->lr_length = lr->lr_length;
|
proxy->lr_length = lr->lr_length;
|
||||||
proxy->lr_count = 1;
|
proxy->lr_count = 1;
|
||||||
|
@ -240,9 +240,11 @@ zfs_rangelock_proxify(avl_tree_t *tree, locked_range_t *lr)
|
||||||
* Split the range lock at the supplied offset
|
* Split the range lock at the supplied offset
|
||||||
* returning the *front* proxy.
|
* returning the *front* proxy.
|
||||||
*/
|
*/
|
||||||
static locked_range_t *
|
static zfs_locked_range_t *
|
||||||
zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
|
zfs_rangelock_split(avl_tree_t *tree, zfs_locked_range_t *lr, uint64_t off)
|
||||||
{
|
{
|
||||||
|
zfs_locked_range_t *rear;
|
||||||
|
|
||||||
ASSERT3U(lr->lr_length, >, 1);
|
ASSERT3U(lr->lr_length, >, 1);
|
||||||
ASSERT3U(off, >, lr->lr_offset);
|
ASSERT3U(off, >, lr->lr_offset);
|
||||||
ASSERT3U(off, <, lr->lr_offset + lr->lr_length);
|
ASSERT3U(off, <, lr->lr_offset + lr->lr_length);
|
||||||
|
@ -250,7 +252,7 @@ zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
|
||||||
ASSERT(lr->lr_read_wanted == B_FALSE);
|
ASSERT(lr->lr_read_wanted == B_FALSE);
|
||||||
|
|
||||||
/* create the rear proxy range lock */
|
/* create the rear proxy range lock */
|
||||||
locked_range_t *rear = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
|
rear = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP);
|
||||||
rear->lr_offset = off;
|
rear->lr_offset = off;
|
||||||
rear->lr_length = lr->lr_offset + lr->lr_length - off;
|
rear->lr_length = lr->lr_offset + lr->lr_length - off;
|
||||||
rear->lr_count = lr->lr_count;
|
rear->lr_count = lr->lr_count;
|
||||||
|
@ -259,7 +261,7 @@ zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
|
||||||
rear->lr_write_wanted = B_FALSE;
|
rear->lr_write_wanted = B_FALSE;
|
||||||
rear->lr_read_wanted = B_FALSE;
|
rear->lr_read_wanted = B_FALSE;
|
||||||
|
|
||||||
locked_range_t *front = zfs_rangelock_proxify(tree, lr);
|
zfs_locked_range_t *front = zfs_rangelock_proxify(tree, lr);
|
||||||
front->lr_length = off - lr->lr_offset;
|
front->lr_length = off - lr->lr_offset;
|
||||||
|
|
||||||
avl_insert_here(tree, rear, front, AVL_AFTER);
|
avl_insert_here(tree, rear, front, AVL_AFTER);
|
||||||
|
@ -272,8 +274,10 @@ zfs_rangelock_split(avl_tree_t *tree, locked_range_t *lr, uint64_t off)
|
||||||
static void
|
static void
|
||||||
zfs_rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
|
zfs_rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
|
||||||
{
|
{
|
||||||
|
zfs_locked_range_t *lr;
|
||||||
|
|
||||||
ASSERT(len != 0);
|
ASSERT(len != 0);
|
||||||
locked_range_t *lr = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
|
lr = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP);
|
||||||
lr->lr_offset = off;
|
lr->lr_offset = off;
|
||||||
lr->lr_length = len;
|
lr->lr_length = len;
|
||||||
lr->lr_count = 1;
|
lr->lr_count = 1;
|
||||||
|
@ -285,10 +289,10 @@ zfs_rangelock_new_proxy(avl_tree_t *tree, uint64_t off, uint64_t len)
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
zfs_rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
|
zfs_rangelock_add_reader(avl_tree_t *tree, zfs_locked_range_t *new,
|
||||||
locked_range_t *prev, avl_index_t where)
|
zfs_locked_range_t *prev, avl_index_t where)
|
||||||
{
|
{
|
||||||
locked_range_t *next;
|
zfs_locked_range_t *next;
|
||||||
uint64_t off = new->lr_offset;
|
uint64_t off = new->lr_offset;
|
||||||
uint64_t len = new->lr_length;
|
uint64_t len = new->lr_length;
|
||||||
|
|
||||||
|
@ -375,10 +379,10 @@ zfs_rangelock_add_reader(avl_tree_t *tree, locked_range_t *new,
|
||||||
* Check if a reader lock can be grabbed, or wait and recheck until available.
|
* Check if a reader lock can be grabbed, or wait and recheck until available.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
zfs_rangelock_enter_reader(rangelock_t *rl, locked_range_t *new)
|
zfs_rangelock_enter_reader(zfs_rangelock_t *rl, zfs_locked_range_t *new)
|
||||||
{
|
{
|
||||||
avl_tree_t *tree = &rl->rl_tree;
|
avl_tree_t *tree = &rl->rl_tree;
|
||||||
locked_range_t *prev, *next;
|
zfs_locked_range_t *prev, *next;
|
||||||
avl_index_t where;
|
avl_index_t where;
|
||||||
uint64_t off = new->lr_offset;
|
uint64_t off = new->lr_offset;
|
||||||
uint64_t len = new->lr_length;
|
uint64_t len = new->lr_length;
|
||||||
|
@ -389,7 +393,7 @@ zfs_rangelock_enter_reader(rangelock_t *rl, locked_range_t *new)
|
||||||
retry:
|
retry:
|
||||||
prev = avl_find(tree, new, &where);
|
prev = avl_find(tree, new, &where);
|
||||||
if (prev == NULL)
|
if (prev == NULL)
|
||||||
prev = (locked_range_t *)avl_nearest(tree, where, AVL_BEFORE);
|
prev = avl_nearest(tree, where, AVL_BEFORE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check the previous range for a writer lock overlap.
|
* Check the previous range for a writer lock overlap.
|
||||||
|
@ -415,7 +419,7 @@ retry:
|
||||||
if (prev != NULL)
|
if (prev != NULL)
|
||||||
next = AVL_NEXT(tree, prev);
|
next = AVL_NEXT(tree, prev);
|
||||||
else
|
else
|
||||||
next = (locked_range_t *)avl_nearest(tree, where, AVL_AFTER);
|
next = avl_nearest(tree, where, AVL_AFTER);
|
||||||
for (; next != NULL; next = AVL_NEXT(tree, next)) {
|
for (; next != NULL; next = AVL_NEXT(tree, next)) {
|
||||||
if (off + len <= next->lr_offset)
|
if (off + len <= next->lr_offset)
|
||||||
goto got_lock;
|
goto got_lock;
|
||||||
|
@ -447,13 +451,15 @@ got_lock:
|
||||||
* the range lock structure for later unlocking (or reduce range if the
|
* the range lock structure for later unlocking (or reduce range if the
|
||||||
* entire file is locked as RL_WRITER).
|
* entire file is locked as RL_WRITER).
|
||||||
*/
|
*/
|
||||||
locked_range_t *
|
zfs_locked_range_t *
|
||||||
zfs_rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
|
zfs_rangelock_enter(zfs_rangelock_t *rl, uint64_t off, uint64_t len,
|
||||||
rangelock_type_t type)
|
zfs_rangelock_type_t type)
|
||||||
{
|
{
|
||||||
|
zfs_locked_range_t *new;
|
||||||
|
|
||||||
ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
|
ASSERT(type == RL_READER || type == RL_WRITER || type == RL_APPEND);
|
||||||
|
|
||||||
locked_range_t *new = kmem_alloc(sizeof (locked_range_t), KM_SLEEP);
|
new = kmem_alloc(sizeof (zfs_locked_range_t), KM_SLEEP);
|
||||||
new->lr_rangelock = rl;
|
new->lr_rangelock = rl;
|
||||||
new->lr_offset = off;
|
new->lr_offset = off;
|
||||||
if (len + off < off) /* overflow */
|
if (len + off < off) /* overflow */
|
||||||
|
@ -483,10 +489,10 @@ zfs_rangelock_enter(rangelock_t *rl, uint64_t off, uint64_t len,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Safely free the locked_range_t.
|
* Safely free the zfs_locked_range_t.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
zfs_rangelock_free(locked_range_t *lr)
|
zfs_rangelock_free(zfs_locked_range_t *lr)
|
||||||
{
|
{
|
||||||
if (lr->lr_write_wanted)
|
if (lr->lr_write_wanted)
|
||||||
cv_destroy(&lr->lr_write_cv);
|
cv_destroy(&lr->lr_write_cv);
|
||||||
|
@ -494,14 +500,14 @@ zfs_rangelock_free(locked_range_t *lr)
|
||||||
if (lr->lr_read_wanted)
|
if (lr->lr_read_wanted)
|
||||||
cv_destroy(&lr->lr_read_cv);
|
cv_destroy(&lr->lr_read_cv);
|
||||||
|
|
||||||
kmem_free(lr, sizeof (locked_range_t));
|
kmem_free(lr, sizeof (zfs_locked_range_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Unlock a reader lock
|
* Unlock a reader lock
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
|
zfs_rangelock_exit_reader(zfs_rangelock_t *rl, zfs_locked_range_t *remove,
|
||||||
list_t *free_list)
|
list_t *free_list)
|
||||||
{
|
{
|
||||||
avl_tree_t *tree = &rl->rl_tree;
|
avl_tree_t *tree = &rl->rl_tree;
|
||||||
|
@ -530,11 +536,11 @@ zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
|
||||||
* then decrement ref count on all proxies
|
* then decrement ref count on all proxies
|
||||||
* that make up this range, freeing them as needed.
|
* that make up this range, freeing them as needed.
|
||||||
*/
|
*/
|
||||||
locked_range_t *lr = avl_find(tree, remove, NULL);
|
zfs_locked_range_t *lr = avl_find(tree, remove, NULL);
|
||||||
ASSERT3P(lr, !=, NULL);
|
ASSERT3P(lr, !=, NULL);
|
||||||
ASSERT3U(lr->lr_count, !=, 0);
|
ASSERT3U(lr->lr_count, !=, 0);
|
||||||
ASSERT3U(lr->lr_type, ==, RL_READER);
|
ASSERT3U(lr->lr_type, ==, RL_READER);
|
||||||
locked_range_t *next = NULL;
|
zfs_locked_range_t *next = NULL;
|
||||||
for (len = remove->lr_length; len != 0; lr = next) {
|
for (len = remove->lr_length; len != 0; lr = next) {
|
||||||
len -= lr->lr_length;
|
len -= lr->lr_length;
|
||||||
if (len != 0) {
|
if (len != 0) {
|
||||||
|
@ -555,7 +561,7 @@ zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
|
||||||
list_insert_tail(free_list, lr);
|
list_insert_tail(free_list, lr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kmem_free(remove, sizeof (locked_range_t));
|
kmem_free(remove, sizeof (zfs_locked_range_t));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -563,11 +569,11 @@ zfs_rangelock_exit_reader(rangelock_t *rl, locked_range_t *remove,
|
||||||
* Unlock range and destroy range lock structure.
|
* Unlock range and destroy range lock structure.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
zfs_rangelock_exit(locked_range_t *lr)
|
zfs_rangelock_exit(zfs_locked_range_t *lr)
|
||||||
{
|
{
|
||||||
rangelock_t *rl = lr->lr_rangelock;
|
zfs_rangelock_t *rl = lr->lr_rangelock;
|
||||||
list_t free_list;
|
list_t free_list;
|
||||||
locked_range_t *free_lr;
|
zfs_locked_range_t *free_lr;
|
||||||
|
|
||||||
ASSERT(lr->lr_type == RL_WRITER || lr->lr_type == RL_READER);
|
ASSERT(lr->lr_type == RL_WRITER || lr->lr_type == RL_READER);
|
||||||
ASSERT(lr->lr_count == 1 || lr->lr_count == 0);
|
ASSERT(lr->lr_count == 1 || lr->lr_count == 0);
|
||||||
|
@ -577,8 +583,8 @@ zfs_rangelock_exit(locked_range_t *lr)
|
||||||
* The free list is used to defer the cv_destroy() and
|
* The free list is used to defer the cv_destroy() and
|
||||||
* subsequent kmem_free until after the mutex is dropped.
|
* subsequent kmem_free until after the mutex is dropped.
|
||||||
*/
|
*/
|
||||||
list_create(&free_list, sizeof (locked_range_t),
|
list_create(&free_list, sizeof (zfs_locked_range_t),
|
||||||
offsetof(locked_range_t, lr_node));
|
offsetof(zfs_locked_range_t, lr_node));
|
||||||
|
|
||||||
mutex_enter(&rl->rl_lock);
|
mutex_enter(&rl->rl_lock);
|
||||||
if (lr->lr_type == RL_WRITER) {
|
if (lr->lr_type == RL_WRITER) {
|
||||||
|
@ -592,7 +598,7 @@ zfs_rangelock_exit(locked_range_t *lr)
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* lock may be shared, let rangelock_exit_reader()
|
* lock may be shared, let rangelock_exit_reader()
|
||||||
* release the lock and free the locked_range_t.
|
* release the lock and free the zfs_locked_range_t.
|
||||||
*/
|
*/
|
||||||
zfs_rangelock_exit_reader(rl, lr, &free_list);
|
zfs_rangelock_exit_reader(rl, lr, &free_list);
|
||||||
}
|
}
|
||||||
|
@ -610,9 +616,9 @@ zfs_rangelock_exit(locked_range_t *lr)
|
||||||
* entry in the tree.
|
* entry in the tree.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
zfs_rangelock_reduce(locked_range_t *lr, uint64_t off, uint64_t len)
|
zfs_rangelock_reduce(zfs_locked_range_t *lr, uint64_t off, uint64_t len)
|
||||||
{
|
{
|
||||||
rangelock_t *rl = lr->lr_rangelock;
|
zfs_rangelock_t *rl = lr->lr_rangelock;
|
||||||
|
|
||||||
/* Ensure there are no other locks */
|
/* Ensure there are no other locks */
|
||||||
ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1);
|
ASSERT3U(avl_numnodes(&rl->rl_tree), ==, 1);
|
||||||
|
|
|
@ -485,7 +485,7 @@ zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
|
||||||
/*
|
/*
|
||||||
* Lock the range against changes.
|
* Lock the range against changes.
|
||||||
*/
|
*/
|
||||||
locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
|
||||||
uio->uio_loffset, uio->uio_resid, RL_READER);
|
uio->uio_loffset, uio->uio_resid, RL_READER);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -666,7 +666,7 @@ zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
|
||||||
/*
|
/*
|
||||||
* If in append mode, set the io offset pointer to eof.
|
* If in append mode, set the io offset pointer to eof.
|
||||||
*/
|
*/
|
||||||
locked_range_t *lr;
|
zfs_locked_range_t *lr;
|
||||||
if (ioflag & FAPPEND) {
|
if (ioflag & FAPPEND) {
|
||||||
/*
|
/*
|
||||||
* Obtain an appending range lock to guarantee file append
|
* Obtain an appending range lock to guarantee file append
|
||||||
|
@ -4526,7 +4526,7 @@ zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
|
||||||
redirty_page_for_writepage(wbc, pp);
|
redirty_page_for_writepage(wbc, pp);
|
||||||
unlock_page(pp);
|
unlock_page(pp);
|
||||||
|
|
||||||
locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
|
zfs_locked_range_t *lr = zfs_rangelock_enter(&zp->z_rangelock,
|
||||||
pgoff, pglen, RL_WRITER);
|
pgoff, pglen, RL_WRITER);
|
||||||
lock_page(pp);
|
lock_page(pp);
|
||||||
|
|
||||||
|
|
|
@ -90,7 +90,7 @@ int zfs_unlink_suspend_progress = 0;
|
||||||
* called with the rangelock_t's rl_lock held, which avoids races.
|
* called with the rangelock_t's rl_lock held, which avoids races.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
zfs_rangelock_cb(locked_range_t *new, void *arg)
|
zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
|
||||||
{
|
{
|
||||||
znode_t *zp = arg;
|
znode_t *zp = arg;
|
||||||
|
|
||||||
|
@ -1477,7 +1477,7 @@ zfs_extend(znode_t *zp, uint64_t end)
|
||||||
{
|
{
|
||||||
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
||||||
dmu_tx_t *tx;
|
dmu_tx_t *tx;
|
||||||
locked_range_t *lr;
|
zfs_locked_range_t *lr;
|
||||||
uint64_t newblksz;
|
uint64_t newblksz;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
|
@ -1595,7 +1595,7 @@ static int
|
||||||
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
|
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
|
||||||
{
|
{
|
||||||
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
||||||
locked_range_t *lr;
|
zfs_locked_range_t *lr;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1675,7 +1675,7 @@ zfs_trunc(znode_t *zp, uint64_t end)
|
||||||
{
|
{
|
||||||
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
zfsvfs_t *zfsvfs = ZTOZSB(zp);
|
||||||
dmu_tx_t *tx;
|
dmu_tx_t *tx;
|
||||||
locked_range_t *lr;
|
zfs_locked_range_t *lr;
|
||||||
int error;
|
int error;
|
||||||
sa_bulk_attr_t bulk[2];
|
sa_bulk_attr_t bulk[2];
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
|
@ -122,7 +122,7 @@ struct zvol_state {
|
||||||
uint32_t zv_open_count; /* open counts */
|
uint32_t zv_open_count; /* open counts */
|
||||||
uint32_t zv_changed; /* disk changed */
|
uint32_t zv_changed; /* disk changed */
|
||||||
zilog_t *zv_zilog; /* ZIL handle */
|
zilog_t *zv_zilog; /* ZIL handle */
|
||||||
rangelock_t zv_rangelock; /* for range locking */
|
zfs_rangelock_t zv_rangelock; /* for range locking */
|
||||||
dnode_t *zv_dn; /* dnode hold */
|
dnode_t *zv_dn; /* dnode hold */
|
||||||
dev_t zv_dev; /* device id */
|
dev_t zv_dev; /* device id */
|
||||||
struct gendisk *zv_disk; /* generic disk */
|
struct gendisk *zv_disk; /* generic disk */
|
||||||
|
@ -720,7 +720,7 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
|
||||||
typedef struct zv_request {
|
typedef struct zv_request {
|
||||||
zvol_state_t *zv;
|
zvol_state_t *zv;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
locked_range_t *lr;
|
zfs_locked_range_t *lr;
|
||||||
} zv_request_t;
|
} zv_request_t;
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
|
Loading…
Reference in New Issue