Macroify teardown lock handling

This will allow platforms to implement it as they see fit, in particular
in a different manner than rrm locks.

Reviewed-by: Ryan Moeller <ryan@iXsystems.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Matt Macy <mmacy@FreeBSD.org>
Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Closes #11153
This commit is contained in:
Mateusz Guzik 2020-11-04 17:23:48 -05:00 committed by Brian Behlendorf
parent 9847f77f01
commit 5ebe425a5b
7 changed files with 106 additions and 40 deletions

View File

@ -46,10 +46,12 @@
extern "C" {
#endif
#define zfs_teardown_lock_t rrmlock_t
#ifdef TEARDOWN_INACTIVE_RMS
typedef struct rmslock zfs_teardown_lock_t;
typedef struct rmslock zfs_teardown_inactive_lock_t;
#else
#define zfs_teardown_lock_t krwlock_t
#define zfs_teardown_inactive_lock_t krwlock_t
#endif
typedef struct zfsvfs zfsvfs_t;
@ -80,8 +82,8 @@ struct zfsvfs {
int z_norm; /* normalization flags */
boolean_t z_atime; /* enable atimes mount option */
boolean_t z_unmounted; /* unmounted */
rrmlock_t z_teardown_lock;
zfs_teardown_lock_t z_teardown_inactive_lock;
zfs_teardown_lock_t z_teardown_lock;
zfs_teardown_inactive_lock_t z_teardown_inactive_lock;
list_t z_all_znodes; /* all vnodes in the fs */
uint64_t z_nr_znodes; /* number of znodes in the fs */
kmutex_t z_znodes_lock; /* lock for z_all_znodes */
@ -112,6 +114,39 @@ struct zfsvfs {
struct task z_unlinked_drain_task;
};
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rrm_init(&(zfsvfs)->z_teardown_lock, B_FALSE)
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rrm_destroy(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_TRY_ENTER_READ(zfsvfs) \
rw_tryenter(&(zfsvfs)->z_teardown_lock, RW_READER)
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, tag);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rrm_enter(&(zfsvfs)->z_teardown_lock, RW_WRITER, tag)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
RRM_READ_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
RRM_WRITE_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
RRM_LOCK_HELD(&(zfsvfs)->z_teardown_lock)
#ifdef TEARDOWN_INACTIVE_RMS
#define ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs) \
rms_init(&(zfsvfs)->z_teardown_inactive_lock, "zfs teardown inactive")

View File

@ -124,15 +124,15 @@ extern minor_t zfsdev_minor_alloc(void);
/* Called on entry to each ZFS vnode and vfs operation */
#define ZFS_ENTER(zfsvfs) \
{ \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG); \
ZFS_TEARDOWN_ENTER_READ((zfsvfs), FTAG); \
if (__predict_false((zfsvfs)->z_unmounted)) { \
ZFS_EXIT(zfsvfs); \
ZFS_TEARDOWN_EXIT_READ(zfsvfs, FTAG); \
return (EIO); \
} \
}
/* Must be called before exiting the vop */
#define ZFS_EXIT(zfsvfs) rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG)
#define ZFS_EXIT(zfsvfs) ZFS_TEARDOWN_EXIT_READ(zfsvfs, FTAG)
/* Verifies the znode is valid */
#define ZFS_VERIFY_ZP(zp) \

View File

@ -138,6 +138,39 @@ struct zfsvfs {
taskqid_t z_drain_task; /* task id for the unlink drain task */
};
#define ZFS_TEARDOWN_INIT(zfsvfs) \
rrm_init(&(zfsvfs)->z_teardown_lock, B_FALSE)
#define ZFS_TEARDOWN_DESTROY(zfsvfs) \
rrm_destroy(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_TRY_ENTER_READ(zfsvfs) \
rw_tryenter(&(zfsvfs)->z_teardown_lock, RW_READER)
#define ZFS_TEARDOWN_ENTER_READ(zfsvfs, tag) \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, tag);
#define ZFS_TEARDOWN_EXIT_READ(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, tag) \
rrm_enter(&(zfsvfs)->z_teardown_lock, RW_WRITER, tag)
#define ZFS_TEARDOWN_EXIT_WRITE(zfsvfs) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_EXIT(zfsvfs, tag) \
rrm_exit(&(zfsvfs)->z_teardown_lock, tag)
#define ZFS_TEARDOWN_READ_HELD(zfsvfs) \
RRM_READ_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_WRITE_HELD(zfsvfs) \
RRM_WRITE_HELD(&(zfsvfs)->z_teardown_lock)
#define ZFS_TEARDOWN_HELD(zfsvfs) \
RRM_LOCK_HELD(&(zfsvfs)->z_teardown_lock)
#define ZSB_XATTR 0x0001 /* Enable user xattrs */
/*

View File

@ -79,9 +79,9 @@ extern "C" {
/* Called on entry to each ZFS inode and vfs operation. */
#define ZFS_ENTER_ERROR(zfsvfs, error) \
do { \
rrm_enter_read(&(zfsvfs)->z_teardown_lock, FTAG); \
ZFS_TEARDOWN_ENTER_READ(zfsvfs, FTAG); \
if (unlikely((zfsvfs)->z_unmounted)) { \
ZFS_EXIT(zfsvfs); \
ZFS_TEARDOWN_EXIT_READ(zfsvfs, FTAG); \
return (error); \
} \
} while (0)
@ -92,7 +92,7 @@ do { \
#define ZFS_EXIT(zfsvfs) \
do { \
zfs_exit_fs(zfsvfs); \
rrm_exit(&(zfsvfs)->z_teardown_lock, FTAG); \
ZFS_TEARDOWN_EXIT_READ(zfsvfs, FTAG); \
} while (0)
#define ZPL_EXIT(zfsvfs) \

View File

@ -986,11 +986,7 @@ zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
offsetof(znode_t, z_link_node));
TASK_INIT(&zfsvfs->z_unlinked_drain_task, 0,
zfsvfs_task_unlinked_drain, zfsvfs);
#ifdef DIAGNOSTIC
rrm_init(&zfsvfs->z_teardown_lock, B_TRUE);
#else
rrm_init(&zfsvfs->z_teardown_lock, B_FALSE);
#endif
ZFS_TEARDOWN_INIT(zfsvfs);
ZFS_TEARDOWN_INACTIVE_INIT(zfsvfs);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
for (int i = 0; i != ZFS_OBJ_MTX_SZ; i++)
@ -1130,7 +1126,7 @@ zfsvfs_free(zfsvfs_t *zfsvfs)
mutex_destroy(&zfsvfs->z_lock);
ASSERT(zfsvfs->z_nr_znodes == 0);
list_destroy(&zfsvfs->z_all_znodes);
rrm_destroy(&zfsvfs->z_teardown_lock);
ZFS_TEARDOWN_DESTROY(zfsvfs);
ZFS_TEARDOWN_INACTIVE_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
@ -1382,10 +1378,10 @@ zfs_mount(vfs_t *vfsp)
* manipulations between entry to zfs_suspend_fs() and return
* from zfs_resume_fs().
*/
rrm_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfs_unregister_callbacks(zfsvfs);
error = zfs_register_callbacks(vfsp);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
goto out;
}
@ -1531,7 +1527,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
break;
}
}
rrm_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
@ -1567,7 +1563,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
@ -1595,7 +1591,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
@ -1655,9 +1651,9 @@ zfs_umount(vfs_t *vfsp, int fflag)
* vflush(FORCECLOSE). This way we ensure no future vnops
* will be called and risk operating on DOOMED vnodes.
*/
rrm_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
zfsvfs->z_unmounted = B_TRUE;
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
@ -1913,7 +1909,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
int err;
znode_t *zp;
ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
@ -1953,7 +1949,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
bail:
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err) {
/*
@ -2068,7 +2064,7 @@ zfs_busy(void)
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(ZFS_TEARDOWN_INACTIVE_WRITE_HELD(zfsvfs));
/*
@ -2086,7 +2082,7 @@ zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
/* release the VOPs */
ZFS_TEARDOWN_INACTIVE_EXIT_WRITE(zfsvfs);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.

View File

@ -815,7 +815,7 @@ zfsvfs_create_impl(zfsvfs_t **zfvp, zfsvfs_t *zfsvfs, objset_t *os)
mutex_init(&zfsvfs->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
rrm_init(&zfsvfs->z_teardown_lock, B_FALSE);
ZFS_TEARDOWN_INIT(zfsvfs);
rw_init(&zfsvfs->z_teardown_inactive_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zfsvfs->z_fuid_lock, NULL, RW_DEFAULT, NULL);
@ -951,7 +951,7 @@ zfsvfs_free(zfsvfs_t *zfsvfs)
mutex_destroy(&zfsvfs->z_znodes_lock);
mutex_destroy(&zfsvfs->z_lock);
list_destroy(&zfsvfs->z_all_znodes);
rrm_destroy(&zfsvfs->z_teardown_lock);
ZFS_TEARDOWN_DESTROY(zfsvfs);
rw_destroy(&zfsvfs->z_teardown_inactive_lock);
rw_destroy(&zfsvfs->z_fuid_lock);
for (i = 0; i != size; i++) {
@ -1336,7 +1336,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
}
}
rrm_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
ZFS_TEARDOWN_ENTER_WRITE(zfsvfs, FTAG);
if (!unmounting) {
/*
@ -1367,7 +1367,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
*/
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
rw_exit(&zfsvfs->z_teardown_inactive_lock);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
return (SET_ERROR(EIO));
}
@ -1404,7 +1404,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
if (unmounting) {
zfsvfs->z_unmounted = B_TRUE;
rw_exit(&zfsvfs->z_teardown_inactive_lock);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
}
/*
@ -1810,7 +1810,7 @@ zfs_resume_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
int err, err2;
znode_t *zp;
ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
@ -1886,7 +1886,7 @@ bail:
/* release the VFS ops */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
if (err != 0) {
/*
@ -1905,7 +1905,7 @@ bail:
int
zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
{
ASSERT(RRM_WRITE_HELD(&zfsvfs->z_teardown_lock));
ASSERT(ZFS_TEARDOWN_WRITE_HELD(zfsvfs));
ASSERT(RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock));
/*
@ -1923,7 +1923,7 @@ zfs_end_fs(zfsvfs_t *zfsvfs, dsl_dataset_t *ds)
/* release the VOPs */
rw_exit(&zfsvfs->z_teardown_inactive_lock);
rrm_exit(&zfsvfs->z_teardown_lock, FTAG);
ZFS_TEARDOWN_EXIT(zfsvfs, FTAG);
/*
* Try to force unmount this file system.

View File

@ -1414,15 +1414,17 @@ zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer)
if (getzfsvfs(name, zfvp) != 0)
error = zfsvfs_create(name, B_FALSE, zfvp);
if (error == 0) {
rrm_enter(&(*zfvp)->z_teardown_lock, (writer) ? RW_WRITER :
RW_READER, tag);
if (writer)
ZFS_TEARDOWN_ENTER_WRITE(*zfvp, tag);
else
ZFS_TEARDOWN_ENTER_READ(*zfvp, tag);
if ((*zfvp)->z_unmounted) {
/*
* XXX we could probably try again, since the unmounting
* thread should be just about to disassociate the
* objset from the zfsvfs.
*/
rrm_exit(&(*zfvp)->z_teardown_lock, tag);
ZFS_TEARDOWN_EXIT(*zfvp, tag);
return (SET_ERROR(EBUSY));
}
}
@ -1432,7 +1434,7 @@ zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer)
static void
zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag)
{
rrm_exit(&zfsvfs->z_teardown_lock, tag);
ZFS_TEARDOWN_EXIT(zfsvfs, tag);
if (zfs_vfs_held(zfsvfs)) {
zfs_vfs_rele(zfsvfs);