Fix z_xattr_lock/z_teardown_lock lock inversion
There exists a lock inversion between the z_xattr_lock and the z_teardown_lock. Detect this case and return EBUSY so zfs_resume_fs() will mark the inode stale and it can be safely revalidated on next access. * process-1 zpl_xattr_get -> Takes zp->z_xattr_lock __zpl_xattr_get zfs_lookup -> Takes zsb->z_teardown_lock in ZFS_ENTER macro * process-2 zfs_ioc_recv -> Takes zsb->z_teardown_lock in zfs_suspend_fs() zfs_resume_fs zfs_rezget -> Takes zp->z_xattr_lock Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Chunwei Chen <david.chen@osnexus.com> Closes #3969
This commit is contained in:
parent
2727b9d3b6
commit
a8ad3bf02c
|
@ -1012,7 +1012,16 @@ zfs_rezget(znode_t *zp)
|
||||||
}
|
}
|
||||||
mutex_exit(&zp->z_acl_lock);
|
mutex_exit(&zp->z_acl_lock);
|
||||||
|
|
||||||
rw_enter(&zp->z_xattr_lock, RW_WRITER);
|
/*
|
||||||
|
* Lock inversion with zpl_xattr_get->__zpl_xattr_get->zfs_lookup
|
||||||
|
* between z_xattr_lock and z_teardown_lock. Detect this case and
|
||||||
|
* return EBUSY so zfs_resume_fs() will mark the inode stale and it
|
||||||
|
* will safely be revalidated on next access.
|
||||||
|
*/
|
||||||
|
err = rw_tryenter(&zp->z_xattr_lock, RW_WRITER);
|
||||||
|
if (!err)
|
||||||
|
return (SET_ERROR(EBUSY));
|
||||||
|
|
||||||
if (zp->z_xattr_cached) {
|
if (zp->z_xattr_cached) {
|
||||||
nvlist_free(zp->z_xattr_cached);
|
nvlist_free(zp->z_xattr_cached);
|
||||||
zp->z_xattr_cached = NULL;
|
zp->z_xattr_cached = NULL;
|
||||||
|
|
Loading…
Reference in New Issue