2008-05-26 04:38:26 +00:00
|
|
|
/*
|
|
|
|
* This file is part of the SPL: Solaris Porting Layer.
|
|
|
|
*
|
2009-09-25 21:47:01 +00:00
|
|
|
* Copyright (c) 2009 Lawrence Livermore National Security, LLC.
|
2008-05-26 04:38:26 +00:00
|
|
|
* Produced at Lawrence Livermore National Laboratory
|
|
|
|
* Written by:
|
|
|
|
* Brian Behlendorf <behlendorf1@llnl.gov>,
|
|
|
|
* Herb Wartens <wartens2@llnl.gov>,
|
|
|
|
* Jim Garlick <garlick@llnl.gov>
|
|
|
|
* UCRL-CODE-235197
|
|
|
|
*
|
|
|
|
* This is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
|
|
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
|
|
*/
|
|
|
|
|
2008-02-28 00:52:31 +00:00
|
|
|
#ifndef _SPL_MUTEX_H
|
2009-09-25 21:47:01 +00:00
|
|
|
#define _SPL_MUTEX_H
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-03-01 00:45:59 +00:00
|
|
|
#include <sys/types.h>
|
2009-09-25 21:47:01 +00:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
|
|
|
|
typedef enum {
|
|
|
|
MUTEX_DEFAULT = 0,
|
|
|
|
MUTEX_SPIN = 1,
|
|
|
|
MUTEX_ADAPTIVE = 2
|
|
|
|
} kmutex_type_t;
|
2008-02-27 19:09:51 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
#ifdef HAVE_MUTEX_OWNER
|
2008-05-05 20:18:49 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
typedef struct mutex kmutex_t;
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
static inline kthread_t *
|
|
|
|
mutex_owner(kmutex_t *mp)
|
|
|
|
{
|
|
|
|
if (mp->owner)
|
|
|
|
return (mp->owner)->task;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
#define mutex_owned(mp) (mutex_owner(mp) == current)
|
|
|
|
#define MUTEX_HELD(mp) mutex_owned(mp)
|
|
|
|
#undef mutex_init
|
|
|
|
#define mutex_init(mp, name, type, ibc) \
|
|
|
|
({ \
|
|
|
|
static struct lock_class_key __key; \
|
|
|
|
ASSERT(type == MUTEX_DEFAULT); \
|
|
|
|
\
|
|
|
|
__mutex_init((mp), #mp, &__key); \
|
|
|
|
})
|
|
|
|
/* #define mutex_destroy(mp) ((void)0) */
|
|
|
|
#define mutex_tryenter(mp) mutex_trylock(mp)
|
|
|
|
#define mutex_enter(mp) mutex_lock(mp)
|
|
|
|
#define mutex_exit(mp) mutex_unlock(mp)
|
|
|
|
|
|
|
|
#else /* HAVE_MUTEX_OWNER */
|
2008-02-27 19:09:51 +00:00
|
|
|
|
2008-02-26 20:36:04 +00:00
|
|
|
typedef struct {
|
2009-09-25 21:47:01 +00:00
|
|
|
struct mutex m_mutex;
|
|
|
|
kthread_t *m_owner;
|
2008-02-26 20:36:04 +00:00
|
|
|
} kmutex_t;
|
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
#ifdef HAVE_TASK_CURR
|
|
|
|
extern int spl_mutex_spin_max(void);
|
|
|
|
#else /* HAVE_TASK_CURR */
|
|
|
|
# define task_curr(owner) 0
|
|
|
|
# define spl_mutex_spin_max() 0
|
|
|
|
#endif /* HAVE_TASK_CURR */
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
#define MUTEX(mp) ((struct mutex *)(mp))
|
2008-04-15 20:53:36 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
static inline kthread_t *
|
|
|
|
spl_mutex_get_owner(kmutex_t *mp)
|
|
|
|
{
|
|
|
|
return mp->m_owner;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
spl_mutex_set_owner(kmutex_t *mp)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
|
|
|
|
mp->m_owner = current;
|
|
|
|
spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
spl_mutex_clear_owner(kmutex_t *mp)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
|
|
|
|
mp->m_owner = NULL;
|
|
|
|
spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline kthread_t *
|
|
|
|
mutex_owner(kmutex_t *mp)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
kthread_t *owner;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&MUTEX(mp)->wait_lock, flags);
|
|
|
|
owner = spl_mutex_get_owner(mp);
|
|
|
|
spin_unlock_irqrestore(&MUTEX(mp)->wait_lock, flags);
|
2008-04-15 20:53:36 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
return owner;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define mutex_owned(mp) (mutex_owner(mp) == current)
|
|
|
|
#define MUTEX_HELD(mp) mutex_owned(mp)
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
/*
|
|
|
|
* The following functions must be a #define and not static inline.
|
|
|
|
* This ensures that the native linux mutex functions (lock/unlock)
|
|
|
|
* will be correctly located in the users code which is important
|
|
|
|
* for the built in kernel lock analysis tools
|
|
|
|
*/
|
2008-05-05 20:18:49 +00:00
|
|
|
#undef mutex_init
|
2009-09-25 21:47:01 +00:00
|
|
|
#define mutex_init(mp, name, type, ibc) \
|
|
|
|
({ \
|
|
|
|
static struct lock_class_key __key; \
|
|
|
|
ASSERT(type == MUTEX_DEFAULT); \
|
|
|
|
\
|
|
|
|
__mutex_init(MUTEX(mp), #mp, &__key); \
|
|
|
|
spl_mutex_clear_owner(mp); \
|
|
|
|
})
|
|
|
|
|
2008-05-05 20:18:49 +00:00
|
|
|
#undef mutex_destroy
|
2009-09-25 21:47:01 +00:00
|
|
|
#define mutex_destroy(mp) \
|
|
|
|
({ \
|
|
|
|
VERIFY(!MUTEX_HELD(mp)); \
|
|
|
|
})
|
2008-04-15 20:53:36 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
#define mutex_tryenter(mp) \
|
|
|
|
({ \
|
|
|
|
int _rc_; \
|
|
|
|
\
|
|
|
|
if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
|
|
|
|
spl_mutex_set_owner(mp); \
|
|
|
|
\
|
|
|
|
_rc_; \
|
2008-05-05 20:18:49 +00:00
|
|
|
})
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2009-09-25 21:47:01 +00:00
|
|
|
/*
|
|
|
|
* Adaptive mutexs assume that the lock may be held by a task running
|
|
|
|
* on a different cpu. The expectation is that the task will drop the
|
|
|
|
* lock before leaving the head of the run queue. So the ideal thing
|
|
|
|
* to do is spin until we acquire the lock and avoid a context switch.
|
|
|
|
* However it is also possible the task holding the lock yields the
|
|
|
|
* processor with out dropping lock. In this case, we know it's going
|
|
|
|
* to be a while so we stop spinning and go to sleep waiting for the
|
|
|
|
* lock to be available. This should strike the optimum balance
|
|
|
|
* between spinning and sleeping waiting for a lock.
|
|
|
|
*/
|
|
|
|
#define mutex_enter(mp) \
|
|
|
|
({ \
|
|
|
|
kthread_t *_owner_; \
|
|
|
|
int _rc_, _count_; \
|
|
|
|
\
|
|
|
|
_rc_ = 0; \
|
|
|
|
_count_ = 0; \
|
|
|
|
_owner_ = mutex_owner(mp); \
|
|
|
|
\
|
|
|
|
while (_owner_ && task_curr(_owner_) && \
|
|
|
|
_count_ <= spl_mutex_spin_max()) { \
|
|
|
|
if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
|
|
|
|
break; \
|
|
|
|
\
|
|
|
|
_count_++; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
if (!_rc_) \
|
|
|
|
mutex_lock(MUTEX(mp)); \
|
|
|
|
\
|
|
|
|
spl_mutex_set_owner(mp); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#define mutex_exit(mp) \
|
|
|
|
({ \
|
|
|
|
spl_mutex_clear_owner(mp); \
|
|
|
|
mutex_unlock(MUTEX(mp)); \
|
|
|
|
})
|
|
|
|
|
|
|
|
#endif /* HAVE_MUTEX_OWNER */
|
|
|
|
|
|
|
|
int spl_mutex_init(void);
|
|
|
|
void spl_mutex_fini(void);
|
|
|
|
|
|
|
|
#endif /* _SPL_MUTEX_H */
|