2015-02-25 17:20:38 +00:00
|
|
|
/*
|
2010-05-17 22:18:00 +00:00
|
|
|
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
|
|
|
* Copyright (C) 2007 The Regents of the University of California.
|
|
|
|
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
|
|
|
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
2008-05-26 04:38:26 +00:00
|
|
|
* UCRL-CODE-235197
|
|
|
|
*
|
2010-05-17 22:18:00 +00:00
|
|
|
* This file is part of the SPL, Solaris Porting Layer.
|
2013-03-05 01:26:55 +00:00
|
|
|
* For details, see <http://zfsonlinux.org/>.
|
2010-05-17 22:18:00 +00:00
|
|
|
*
|
|
|
|
* The SPL is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2 of the License, or (at your
|
|
|
|
* option) any later version.
|
2008-05-26 04:38:26 +00:00
|
|
|
*
|
2010-05-17 22:18:00 +00:00
|
|
|
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
2008-05-26 04:38:26 +00:00
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
2010-05-17 22:18:00 +00:00
|
|
|
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
2015-02-25 17:20:38 +00:00
|
|
|
*/
|
2008-05-26 04:38:26 +00:00
|
|
|
|
2008-02-28 00:52:31 +00:00
|
|
|
#ifndef _SPL_MUTEX_H
|
2015-02-25 17:20:38 +00:00
|
|
|
#define _SPL_MUTEX_H
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-03-01 00:45:59 +00:00
|
|
|
#include <sys/types.h>
|
2009-09-25 21:47:01 +00:00
|
|
|
#include <linux/mutex.h>
|
2010-07-20 20:47:37 +00:00
|
|
|
#include <linux/compiler_compat.h>
|
2015-10-06 21:01:46 +00:00
|
|
|
#include <linux/lockdep.h>
|
2009-09-25 21:47:01 +00:00
|
|
|
|
|
|
|
typedef enum {
|
2015-02-25 17:20:38 +00:00
|
|
|
MUTEX_DEFAULT = 0,
|
|
|
|
MUTEX_SPIN = 1,
|
2015-10-06 21:01:46 +00:00
|
|
|
MUTEX_ADAPTIVE = 2,
|
|
|
|
MUTEX_NOLOCKDEP = 3
|
2009-09-25 21:47:01 +00:00
|
|
|
} kmutex_type_t;
|
2008-02-27 19:09:51 +00:00
|
|
|
|
2008-02-26 20:36:04 +00:00
|
|
|
typedef struct {
|
2015-02-25 17:20:38 +00:00
|
|
|
struct mutex m_mutex;
|
|
|
|
spinlock_t m_lock; /* used for serializing mutex_exit */
|
2017-08-02 19:07:11 +00:00
|
|
|
#ifndef HAVE_MUTEX_OWNER
|
2016-04-12 19:05:14 +00:00
|
|
|
/* only when kernel doesn't have owner */
|
2015-02-25 17:20:38 +00:00
|
|
|
kthread_t *m_owner;
|
2017-08-02 19:07:11 +00:00
|
|
|
#endif
|
2015-10-06 21:01:46 +00:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
kmutex_type_t m_type;
|
|
|
|
#endif /* CONFIG_LOCKDEP */
|
2008-02-26 20:36:04 +00:00
|
|
|
} kmutex_t;
|
|
|
|
|
2015-02-25 17:20:38 +00:00
|
|
|
#define MUTEX(mp) (&((mp)->m_mutex))
|
2015-03-31 12:49:15 +00:00
|
|
|
|
|
|
|
static inline void
|
|
|
|
spl_mutex_set_owner(kmutex_t *mp)
|
|
|
|
{
|
2016-04-12 19:05:14 +00:00
|
|
|
/*
|
|
|
|
* kernel will handle its owner, so we don't need to do anything if it
|
|
|
|
* is defined.
|
|
|
|
*/
|
2017-08-02 19:07:11 +00:00
|
|
|
#ifndef HAVE_MUTEX_OWNER
|
2015-03-31 12:49:15 +00:00
|
|
|
mp->m_owner = current;
|
2017-08-02 19:07:11 +00:00
|
|
|
#endif
|
2015-03-31 12:49:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
spl_mutex_clear_owner(kmutex_t *mp)
|
|
|
|
{
|
2017-08-02 19:07:11 +00:00
|
|
|
#ifndef HAVE_MUTEX_OWNER
|
2015-03-31 12:49:15 +00:00
|
|
|
mp->m_owner = NULL;
|
2017-08-02 19:07:11 +00:00
|
|
|
#endif
|
2015-03-31 12:49:15 +00:00
|
|
|
}
|
|
|
|
|
2017-08-02 19:07:11 +00:00
|
|
|
#ifdef HAVE_MUTEX_OWNER
|
2016-04-12 19:05:14 +00:00
|
|
|
#define mutex_owner(mp) (ACCESS_ONCE(MUTEX(mp)->owner))
|
2017-08-02 19:07:11 +00:00
|
|
|
#else
|
|
|
|
#define mutex_owner(mp) (ACCESS_ONCE((mp)->m_owner))
|
|
|
|
#endif
|
2015-02-25 17:20:38 +00:00
|
|
|
#define mutex_owned(mp) (mutex_owner(mp) == current)
|
|
|
|
#define MUTEX_HELD(mp) mutex_owned(mp)
|
|
|
|
#define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2015-10-06 21:01:46 +00:00
|
|
|
#ifdef CONFIG_LOCKDEP
|
|
|
|
static inline void
|
|
|
|
spl_mutex_set_type(kmutex_t *mp, kmutex_type_t type)
|
|
|
|
{
|
|
|
|
mp->m_type = type;
|
|
|
|
}
|
|
|
|
static inline void
|
|
|
|
spl_mutex_lockdep_off_maybe(kmutex_t *mp) \
|
|
|
|
{ \
|
|
|
|
if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
|
|
|
|
lockdep_off(); \
|
|
|
|
}
|
|
|
|
static inline void
|
|
|
|
spl_mutex_lockdep_on_maybe(kmutex_t *mp) \
|
|
|
|
{ \
|
|
|
|
if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
|
|
|
|
lockdep_on(); \
|
|
|
|
}
|
|
|
|
#else /* CONFIG_LOCKDEP */
|
|
|
|
#define spl_mutex_set_type(mp, type)
|
|
|
|
#define spl_mutex_lockdep_off_maybe(mp)
|
|
|
|
#define spl_mutex_lockdep_on_maybe(mp)
|
|
|
|
#endif /* CONFIG_LOCKDEP */
|
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
/*
|
|
|
|
* The following functions must be a #define and not static inline.
|
|
|
|
* This ensures that the native linux mutex functions (lock/unlock)
|
|
|
|
* will be correctly located in the users code which is important
|
|
|
|
* for the built in kernel lock analysis tools
|
|
|
|
*/
|
2008-05-05 20:18:49 +00:00
|
|
|
#undef mutex_init
|
2015-02-25 17:20:38 +00:00
|
|
|
#define mutex_init(mp, name, type, ibc) \
|
|
|
|
{ \
|
|
|
|
static struct lock_class_key __key; \
|
2015-10-06 21:01:46 +00:00
|
|
|
ASSERT(type == MUTEX_DEFAULT || type == MUTEX_NOLOCKDEP); \
|
2015-02-25 17:20:38 +00:00
|
|
|
\
|
2015-03-20 19:03:26 +00:00
|
|
|
__mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \
|
2015-02-25 17:20:38 +00:00
|
|
|
spin_lock_init(&(mp)->m_lock); \
|
2015-03-31 12:49:15 +00:00
|
|
|
spl_mutex_clear_owner(mp); \
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_set_type(mp, type); \
|
2015-02-25 17:20:38 +00:00
|
|
|
}
|
2009-09-25 21:47:01 +00:00
|
|
|
|
2008-05-05 20:18:49 +00:00
|
|
|
#undef mutex_destroy
|
2015-02-25 17:20:38 +00:00
|
|
|
#define mutex_destroy(mp) \
|
|
|
|
{ \
|
|
|
|
VERIFY3P(mutex_owner(mp), ==, NULL); \
|
|
|
|
}
|
2008-04-15 20:53:36 +00:00
|
|
|
|
2015-02-25 17:20:38 +00:00
|
|
|
#define mutex_tryenter(mp) \
|
|
|
|
({ \
|
|
|
|
int _rc_; \
|
|
|
|
\
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_lockdep_off_maybe(mp); \
|
2015-03-31 12:49:15 +00:00
|
|
|
if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
|
|
|
|
spl_mutex_set_owner(mp); \
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_lockdep_on_maybe(mp); \
|
2015-02-25 17:20:38 +00:00
|
|
|
\
|
|
|
|
_rc_; \
|
2008-05-05 20:18:49 +00:00
|
|
|
})
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2015-03-20 19:03:26 +00:00
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
|
|
#define mutex_enter_nested(mp, subclass) \
|
|
|
|
{ \
|
|
|
|
ASSERT3P(mutex_owner(mp), !=, current); \
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_lockdep_off_maybe(mp); \
|
2015-03-20 19:03:26 +00:00
|
|
|
mutex_lock_nested(MUTEX(mp), (subclass)); \
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_lockdep_on_maybe(mp); \
|
2015-03-31 12:49:15 +00:00
|
|
|
spl_mutex_set_owner(mp); \
|
2015-03-20 19:03:26 +00:00
|
|
|
}
|
|
|
|
#else /* CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
|
#define mutex_enter_nested(mp, subclass) \
|
2015-02-25 17:20:38 +00:00
|
|
|
{ \
|
|
|
|
ASSERT3P(mutex_owner(mp), !=, current); \
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_lockdep_off_maybe(mp); \
|
2015-02-25 17:20:38 +00:00
|
|
|
mutex_lock(MUTEX(mp)); \
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_lockdep_on_maybe(mp); \
|
2015-03-31 12:49:15 +00:00
|
|
|
spl_mutex_set_owner(mp); \
|
2015-02-25 17:20:38 +00:00
|
|
|
}
|
2015-03-20 19:03:26 +00:00
|
|
|
#endif /* CONFIG_DEBUG_LOCK_ALLOC */
|
|
|
|
|
|
|
|
#define mutex_enter(mp) mutex_enter_nested((mp), 0)
|
2009-09-25 21:47:01 +00:00
|
|
|
|
2015-02-25 18:23:49 +00:00
|
|
|
/*
|
|
|
|
* The reason for the spinlock:
|
|
|
|
*
|
|
|
|
* The Linux mutex is designed with a fast-path/slow-path design such that it
|
|
|
|
* does not guarantee serialization upon itself, allowing a race where latter
|
|
|
|
* acquirers finish mutex_unlock before former ones.
|
|
|
|
*
|
|
|
|
* The race renders it unsafe to be used for serializing the freeing of an
|
|
|
|
* object in which the mutex is embedded, where the latter acquirer could go
|
|
|
|
* on to free the object while the former one is still doing mutex_unlock and
|
|
|
|
* causing memory corruption.
|
|
|
|
*
|
|
|
|
* However, there are many places in ZFS where the mutex is used for
|
|
|
|
* serializing object freeing, and the code is shared among other OSes without
|
|
|
|
* this issue. Thus, we need the spinlock to force the serialization on
|
|
|
|
* mutex_exit().
|
|
|
|
*
|
|
|
|
* See http://lwn.net/Articles/575477/ for the information about the race.
|
|
|
|
*/
|
2015-02-25 17:20:38 +00:00
|
|
|
#define mutex_exit(mp) \
|
|
|
|
{ \
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_lockdep_off_maybe(mp); \
|
2015-02-25 17:20:38 +00:00
|
|
|
spin_lock(&(mp)->m_lock); \
|
2015-03-31 12:49:15 +00:00
|
|
|
spl_mutex_clear_owner(mp); \
|
2015-02-25 17:20:38 +00:00
|
|
|
mutex_unlock(MUTEX(mp)); \
|
|
|
|
spin_unlock(&(mp)->m_lock); \
|
2015-10-06 21:01:46 +00:00
|
|
|
spl_mutex_lockdep_on_maybe(mp); \
|
2015-02-25 17:20:38 +00:00
|
|
|
}
|
2009-09-25 21:47:01 +00:00
|
|
|
|
|
|
|
int spl_mutex_init(void);
|
|
|
|
void spl_mutex_fini(void);
|
|
|
|
|
|
|
|
#endif /* _SPL_MUTEX_H */
|