Add new lock types MUTEX_NOLOCKDEP, and RW_NOLOCKDEP

When running a kernel with CONFIG_LOCKDEP=y, lockdep reports possible
recursive locking in some cases and possible circular locking dependency
in others, within the SPL and ZFS modules.

When lockdep detects these conditions, it disables further lock analysis
for all locks.  This causes /proc/lock_stats not to reflect full
information about lock contention, even in locks without dependency
issues.

This commit creates a new type of mutex, MUTEX_NOLOCKDEP.  This mutex
type causes subsequent attempts to take or release those locks to be
wrapped in lockdep_off() and lockdep_on().

This commit also creates an RW_NOLOCKDEP type analagous to
MUTEX_NOLOCKDEP.

MUTEX_NOLOCKDEP and RW_NOLOCKDEP are also defined in zfs, in a commit to
that repo, for userspace builds.

Signed-off-by: Olaf Faaland <faaland1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Issue #480
This commit is contained in:
Olaf Faaland 2015-10-06 14:01:46 -07:00 committed by Brian Behlendorf
parent 61bbbd9a77
commit 692ae8d398
2 changed files with 81 additions and 3 deletions

View File

@ -28,17 +28,22 @@
#include <sys/types.h> #include <sys/types.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/compiler_compat.h> #include <linux/compiler_compat.h>
#include <linux/lockdep.h>
typedef enum { typedef enum {
MUTEX_DEFAULT = 0, MUTEX_DEFAULT = 0,
MUTEX_SPIN = 1, MUTEX_SPIN = 1,
MUTEX_ADAPTIVE = 2 MUTEX_ADAPTIVE = 2,
MUTEX_NOLOCKDEP = 3
} kmutex_type_t; } kmutex_type_t;
typedef struct { typedef struct {
struct mutex m_mutex; struct mutex m_mutex;
spinlock_t m_lock; /* used for serializing mutex_exit */ spinlock_t m_lock; /* used for serializing mutex_exit */
kthread_t *m_owner; kthread_t *m_owner;
#ifdef CONFIG_LOCKDEP
kmutex_type_t m_type;
#endif /* CONFIG_LOCKDEP */
} kmutex_t; } kmutex_t;
#define MUTEX(mp) (&((mp)->m_mutex)) #define MUTEX(mp) (&((mp)->m_mutex))
@ -60,6 +65,30 @@ spl_mutex_clear_owner(kmutex_t *mp)
#define MUTEX_HELD(mp) mutex_owned(mp) #define MUTEX_HELD(mp) mutex_owned(mp)
#define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp)) #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
#ifdef CONFIG_LOCKDEP
static inline void
spl_mutex_set_type(kmutex_t *mp, kmutex_type_t type)
{
mp->m_type = type;
}
static inline void
spl_mutex_lockdep_off_maybe(kmutex_t *mp) \
{ \
if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
lockdep_off(); \
}
static inline void
spl_mutex_lockdep_on_maybe(kmutex_t *mp) \
{ \
if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
lockdep_on(); \
}
#else /* CONFIG_LOCKDEP */
#define spl_mutex_set_type(mp, type)
#define spl_mutex_lockdep_off_maybe(mp)
#define spl_mutex_lockdep_on_maybe(mp)
#endif /* CONFIG_LOCKDEP */
/* /*
* The following functions must be a #define and not static inline. * The following functions must be a #define and not static inline.
* This ensures that the native linux mutex functions (lock/unlock) * This ensures that the native linux mutex functions (lock/unlock)
@ -70,11 +99,12 @@ spl_mutex_clear_owner(kmutex_t *mp)
#define mutex_init(mp, name, type, ibc) \ #define mutex_init(mp, name, type, ibc) \
{ \ { \
static struct lock_class_key __key; \ static struct lock_class_key __key; \
ASSERT(type == MUTEX_DEFAULT); \ ASSERT(type == MUTEX_DEFAULT || type == MUTEX_NOLOCKDEP); \
\ \
__mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \ __mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \
spin_lock_init(&(mp)->m_lock); \ spin_lock_init(&(mp)->m_lock); \
spl_mutex_clear_owner(mp); \ spl_mutex_clear_owner(mp); \
spl_mutex_set_type(mp, type); \
} }
#undef mutex_destroy #undef mutex_destroy
@ -87,8 +117,10 @@ spl_mutex_clear_owner(kmutex_t *mp)
({ \ ({ \
int _rc_; \ int _rc_; \
\ \
spl_mutex_lockdep_off_maybe(mp); \
if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \ if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
spl_mutex_set_owner(mp); \ spl_mutex_set_owner(mp); \
spl_mutex_lockdep_on_maybe(mp); \
\ \
_rc_; \ _rc_; \
}) })
@ -97,14 +129,18 @@ spl_mutex_clear_owner(kmutex_t *mp)
#define mutex_enter_nested(mp, subclass) \ #define mutex_enter_nested(mp, subclass) \
{ \ { \
ASSERT3P(mutex_owner(mp), !=, current); \ ASSERT3P(mutex_owner(mp), !=, current); \
spl_mutex_lockdep_off_maybe(mp); \
mutex_lock_nested(MUTEX(mp), (subclass)); \ mutex_lock_nested(MUTEX(mp), (subclass)); \
spl_mutex_lockdep_on_maybe(mp); \
spl_mutex_set_owner(mp); \ spl_mutex_set_owner(mp); \
} }
#else /* CONFIG_DEBUG_LOCK_ALLOC */ #else /* CONFIG_DEBUG_LOCK_ALLOC */
#define mutex_enter_nested(mp, subclass) \ #define mutex_enter_nested(mp, subclass) \
{ \ { \
ASSERT3P(mutex_owner(mp), !=, current); \ ASSERT3P(mutex_owner(mp), !=, current); \
spl_mutex_lockdep_off_maybe(mp); \
mutex_lock(MUTEX(mp)); \ mutex_lock(MUTEX(mp)); \
spl_mutex_lockdep_on_maybe(mp); \
spl_mutex_set_owner(mp); \ spl_mutex_set_owner(mp); \
} }
#endif /* CONFIG_DEBUG_LOCK_ALLOC */ #endif /* CONFIG_DEBUG_LOCK_ALLOC */
@ -132,10 +168,12 @@ spl_mutex_clear_owner(kmutex_t *mp)
*/ */
#define mutex_exit(mp) \ #define mutex_exit(mp) \
{ \ { \
spl_mutex_lockdep_off_maybe(mp); \
spin_lock(&(mp)->m_lock); \ spin_lock(&(mp)->m_lock); \
spl_mutex_clear_owner(mp); \ spl_mutex_clear_owner(mp); \
mutex_unlock(MUTEX(mp)); \ mutex_unlock(MUTEX(mp)); \
spin_unlock(&(mp)->m_lock); \ spin_unlock(&(mp)->m_lock); \
spl_mutex_lockdep_on_maybe(mp); \
} }
int spl_mutex_init(void); int spl_mutex_init(void);

View File

@ -31,7 +31,8 @@
typedef enum { typedef enum {
RW_DRIVER = 2, RW_DRIVER = 2,
RW_DEFAULT = 4 RW_DEFAULT = 4,
RW_NOLOCKDEP = 5
} krw_type_t; } krw_type_t;
typedef enum { typedef enum {
@ -49,6 +50,9 @@ typedef struct {
#ifndef CONFIG_RWSEM_SPIN_ON_OWNER #ifndef CONFIG_RWSEM_SPIN_ON_OWNER
kthread_t *rw_owner; kthread_t *rw_owner;
#endif #endif
#ifdef CONFIG_LOCKDEP
krw_type_t rw_type;
#endif /* CONFIG_LOCKDEP */
} krwlock_t; } krwlock_t;
#define SEM(rwp) (&(rwp)->rw_rwlock) #define SEM(rwp) (&(rwp)->rw_rwlock)
@ -83,6 +87,30 @@ rw_owner(krwlock_t *rwp)
#endif #endif
} }
#ifdef CONFIG_LOCKDEP
static inline void
spl_rw_set_type(krwlock_t *rwp, krw_type_t type)
{
rwp->rw_type = type;
}
static inline void
spl_rw_lockdep_off_maybe(krwlock_t *rwp) \
{ \
if (rwp && rwp->rw_type == RW_NOLOCKDEP) \
lockdep_off(); \
}
static inline void
spl_rw_lockdep_on_maybe(krwlock_t *rwp) \
{ \
if (rwp && rwp->rw_type == RW_NOLOCKDEP) \
lockdep_on(); \
}
#else /* CONFIG_LOCKDEP */
#define spl_rw_set_type(rwp, type)
#define spl_rw_lockdep_off_maybe(rwp)
#define spl_rw_lockdep_on_maybe(rwp)
#endif /* CONFIG_LOCKDEP */
static inline int static inline int
RW_READ_HELD(krwlock_t *rwp) RW_READ_HELD(krwlock_t *rwp)
{ {
@ -110,9 +138,11 @@ RW_LOCK_HELD(krwlock_t *rwp)
#define rw_init(rwp, name, type, arg) \ #define rw_init(rwp, name, type, arg) \
({ \ ({ \
static struct lock_class_key __key; \ static struct lock_class_key __key; \
ASSERT(type == RW_DEFAULT || type == RW_NOLOCKDEP); \
\ \
__init_rwsem(SEM(rwp), #rwp, &__key); \ __init_rwsem(SEM(rwp), #rwp, &__key); \
spl_rw_clear_owner(rwp); \ spl_rw_clear_owner(rwp); \
spl_rw_set_type(rwp, type); \
}) })
#define rw_destroy(rwp) \ #define rw_destroy(rwp) \
@ -124,6 +154,7 @@ RW_LOCK_HELD(krwlock_t *rwp)
({ \ ({ \
int _rc_ = 0; \ int _rc_ = 0; \
\ \
spl_rw_lockdep_off_maybe(rwp); \
switch (rw) { \ switch (rw) { \
case RW_READER: \ case RW_READER: \
_rc_ = down_read_trylock(SEM(rwp)); \ _rc_ = down_read_trylock(SEM(rwp)); \
@ -135,11 +166,13 @@ RW_LOCK_HELD(krwlock_t *rwp)
default: \ default: \
VERIFY(0); \ VERIFY(0); \
} \ } \
spl_rw_lockdep_on_maybe(rwp); \
_rc_; \ _rc_; \
}) })
#define rw_enter(rwp, rw) \ #define rw_enter(rwp, rw) \
({ \ ({ \
spl_rw_lockdep_off_maybe(rwp); \
switch (rw) { \ switch (rw) { \
case RW_READER: \ case RW_READER: \
down_read(SEM(rwp)); \ down_read(SEM(rwp)); \
@ -151,10 +184,12 @@ RW_LOCK_HELD(krwlock_t *rwp)
default: \ default: \
VERIFY(0); \ VERIFY(0); \
} \ } \
spl_rw_lockdep_on_maybe(rwp); \
}) })
#define rw_exit(rwp) \ #define rw_exit(rwp) \
({ \ ({ \
spl_rw_lockdep_off_maybe(rwp); \
if (RW_WRITE_HELD(rwp)) { \ if (RW_WRITE_HELD(rwp)) { \
spl_rw_clear_owner(rwp); \ spl_rw_clear_owner(rwp); \
up_write(SEM(rwp)); \ up_write(SEM(rwp)); \
@ -162,12 +197,15 @@ RW_LOCK_HELD(krwlock_t *rwp)
ASSERT(RW_READ_HELD(rwp)); \ ASSERT(RW_READ_HELD(rwp)); \
up_read(SEM(rwp)); \ up_read(SEM(rwp)); \
} \ } \
spl_rw_lockdep_on_maybe(rwp); \
}) })
#define rw_downgrade(rwp) \ #define rw_downgrade(rwp) \
({ \ ({ \
spl_rw_lockdep_off_maybe(rwp); \
spl_rw_clear_owner(rwp); \ spl_rw_clear_owner(rwp); \
downgrade_write(SEM(rwp)); \ downgrade_write(SEM(rwp)); \
spl_rw_lockdep_on_maybe(rwp); \
}) })
#if defined(CONFIG_RWSEM_GENERIC_SPINLOCK) #if defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
@ -191,6 +229,7 @@ extern int __down_write_trylock_locked(struct rw_semaphore *);
unsigned long _flags_; \ unsigned long _flags_; \
int _rc_ = 0; \ int _rc_ = 0; \
\ \
spl_rw_lockdep_off_maybe(rwp); \
spl_rwsem_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \ spl_rwsem_lock_irqsave(&SEM(rwp)->wait_lock, _flags_); \
if ((list_empty(&SEM(rwp)->wait_list)) && \ if ((list_empty(&SEM(rwp)->wait_list)) && \
(SEM(rwp)->activity == 1)) { \ (SEM(rwp)->activity == 1)) { \
@ -199,6 +238,7 @@ extern int __down_write_trylock_locked(struct rw_semaphore *);
(rwp)->rw_owner = current; \ (rwp)->rw_owner = current; \
} \ } \
spl_rwsem_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \ spl_rwsem_unlock_irqrestore(&SEM(rwp)->wait_lock, _flags_); \
spl_rw_lockdep_on_maybe(rwp); \
_rc_; \ _rc_; \
}) })
#else #else