Move zfsdev_state_{init,destroy} to common code

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ryan Moeller <freqlabs@FreeBSD.org>
Closes #11833
This commit is contained in:
Ryan Moeller 2021-03-16 13:04:58 +00:00 committed by Brian Behlendorf
parent 1dff545278
commit a631283b74
6 changed files with 96 additions and 109 deletions

View File

@ -77,8 +77,6 @@ typedef struct zfs_soft_state {
void *zss_data;
} zfs_soft_state_t;
extern minor_t zfsdev_minor_alloc(void);
/*
* Range locking rules
* --------------------

View File

@ -567,7 +567,6 @@ typedef struct zfsdev_state {
extern void *zfsdev_get_state(minor_t minor, enum zfsdev_state_type which);
extern int zfsdev_getminor(int fd, minor_t *minorp);
extern minor_t zfsdev_minor_alloc(void);
extern uint_t zfs_fsyncer_key;
extern uint_t zfs_allow_log_key;

View File

@ -91,6 +91,10 @@ void zfs_vfs_rele(zfsvfs_t *);
long zfsdev_ioctl_common(uint_t, zfs_cmd_t *, int);
int zfsdev_attach(void);
void zfsdev_detach(void);
void zfsdev_private_set_state(void *, zfsdev_state_t *);
zfsdev_state_t *zfsdev_private_get_state(void *);
int zfsdev_state_init(void *);
void zfsdev_state_destroy(void *);
int zfs_kmod_init(void);
void zfs_kmod_fini(void);

View File

@ -113,7 +113,6 @@ static int zfs__fini(void);
static void zfs_shutdown(void *, int);
static eventhandler_tag zfs_shutdown_event_tag;
extern zfsdev_state_t *zfsdev_state_list;
#define ZFS_MIN_KSTACK_PAGES 4
@ -182,66 +181,29 @@ out:
static void
zfsdev_close(void *data)
{
zfsdev_state_t *zs = data;
ASSERT(zs != NULL);
ASSERT3S(zs->zs_minor, >, 0);
zfs_onexit_destroy(zs->zs_onexit);
zfs_zevent_destroy(zs->zs_zevent);
zs->zs_onexit = NULL;
zs->zs_zevent = NULL;
membar_producer();
zs->zs_minor = -1;
zfsdev_state_destroy(data);
}
static int
zfs_ctldev_init(struct cdev *devp)
void
zfsdev_private_set_state(void *priv __unused, zfsdev_state_t *zs)
{
boolean_t newzs = B_FALSE;
minor_t minor;
zfsdev_state_t *zs, *zsprev = NULL;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
minor = zfsdev_minor_alloc();
if (minor == 0)
return (SET_ERROR(ENXIO));
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == -1)
break;
zsprev = zs;
}
if (!zs) {
zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
newzs = B_TRUE;
}
devfs_set_cdevpriv(zs, zfsdev_close);
}
zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit);
zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent);
if (newzs) {
zs->zs_minor = minor;
wmb();
zsprev->zs_next = zs;
} else {
wmb();
zs->zs_minor = minor;
}
return (0);
zfsdev_state_t *
zfsdev_private_get_state(void *priv)
{
return (priv);
}
static int
zfsdev_open(struct cdev *devp, int flag, int mode, struct thread *td)
zfsdev_open(struct cdev *devp __unused, int flag __unused, int mode __unused,
struct thread *td __unused)
{
int error;
mutex_enter(&zfsdev_state_lock);
error = zfs_ctldev_init(devp);
error = zfsdev_state_init(NULL);
mutex_exit(&zfsdev_state_lock);
return (error);

View File

@ -87,69 +87,20 @@ zfs_vfs_rele(zfsvfs_t *zfsvfs)
deactivate_super(zfsvfs->z_sb);
}
static int
zfsdev_state_init(struct file *filp)
void
zfsdev_private_set_state(void *priv, zfsdev_state_t *zs)
{
zfsdev_state_t *zs, *zsprev = NULL;
minor_t minor;
boolean_t newzs = B_FALSE;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
minor = zfsdev_minor_alloc();
if (minor == 0)
return (SET_ERROR(ENXIO));
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == -1)
break;
zsprev = zs;
}
if (!zs) {
zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
newzs = B_TRUE;
}
struct file *filp = priv;
filp->private_data = zs;
zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit);
zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent);
/*
* In order to provide for lock-free concurrent read access
* to the minor list in zfsdev_get_state(), new entries
* must be completely written before linking them into the
* list whereas existing entries are already linked; the last
* operation must be updating zs_minor (from -1 to the new
* value).
*/
if (newzs) {
zs->zs_minor = minor;
smp_wmb();
zsprev->zs_next = zs;
} else {
smp_wmb();
zs->zs_minor = minor;
}
return (0);
}
static void
zfsdev_state_destroy(struct file *filp)
zfsdev_state_t *
zfsdev_private_get_state(void *priv)
{
zfsdev_state_t *zs = filp->private_data;
struct file *filp = priv;
ASSERT(zs != NULL);
ASSERT3S(zs->zs_minor, >, 0);
zfs_onexit_destroy(zs->zs_onexit);
zfs_zevent_destroy(zs->zs_zevent);
zs->zs_onexit = NULL;
zs->zs_zevent = NULL;
membar_producer();
zs->zs_minor = -1;
return (filp->private_data);
}
static int

View File

@ -7378,7 +7378,7 @@ zfsdev_get_state(minor_t minor, enum zfsdev_state_type which)
* Find a free minor number. The zfsdev_state_list is expected to
* be short since it is only a list of currently open file handles.
*/
minor_t
static minor_t
zfsdev_minor_alloc(void)
{
static minor_t last_minor = 0;
@ -7398,6 +7398,79 @@ zfsdev_minor_alloc(void)
return (0);
}
int
zfsdev_state_init(void *priv)
{
zfsdev_state_t *zs, *zsprev = NULL;
minor_t minor;
boolean_t newzs = B_FALSE;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
minor = zfsdev_minor_alloc();
if (minor == 0)
return (SET_ERROR(ENXIO));
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == -1)
break;
zsprev = zs;
}
if (!zs) {
zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
newzs = B_TRUE;
}
zfsdev_private_set_state(priv, zs);
zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit);
zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent);
/*
* In order to provide for lock-free concurrent read access
* to the minor list in zfsdev_get_state(), new entries
* must be completely written before linking them into the
* list whereas existing entries are already linked; the last
* operation must be updating zs_minor (from -1 to the new
* value).
*/
if (newzs) {
zs->zs_minor = minor;
membar_producer();
zsprev->zs_next = zs;
} else {
membar_producer();
zs->zs_minor = minor;
}
return (0);
}
void
zfsdev_state_destroy(void *priv)
{
zfsdev_state_t *zs = zfsdev_private_get_state(priv);
ASSERT(zs != NULL);
ASSERT3S(zs->zs_minor, >, 0);
/*
* The last reference to this zfsdev file descriptor is being dropped.
* We don't have to worry about lookup grabbing this state object, and
* zfsdev_state_init() will not try to reuse this object until it is
* invalidated by setting zs_minor to -1. Invalidation must be done
* last, with a memory barrier to ensure ordering. This lets us avoid
* taking the global zfsdev state lock around destruction.
*/
zfs_onexit_destroy(zs->zs_onexit);
zfs_zevent_destroy(zs->zs_zevent);
zs->zs_onexit = NULL;
zs->zs_zevent = NULL;
membar_producer();
zs->zs_minor = -1;
}
long
zfsdev_ioctl_common(uint_t vecnum, zfs_cmd_t *zc, int flag)
{