- Add some spinlocks to cover all the private data in the mutex. I don't
think this should fix anything but it's a good idea regardless. - Drop the lock before calling the construct/destructor for the slab otherwise we can't sleep in a constructor/destructor and for long running functions we may NMI. - Do something braindead, but safe for the console debug logs for now. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@73 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
parent
c5fd77fcbf
commit
d61e12af5a
|
@ -29,16 +29,19 @@ typedef struct {
|
||||||
char *km_name;
|
char *km_name;
|
||||||
struct task_struct *km_owner;
|
struct task_struct *km_owner;
|
||||||
struct semaphore km_sem;
|
struct semaphore km_sem;
|
||||||
|
spinlock_t km_lock;
|
||||||
} kmutex_t;
|
} kmutex_t;
|
||||||
|
|
||||||
#undef mutex_init
|
#undef mutex_init
|
||||||
static __inline__ void
|
static __inline__ void
|
||||||
mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
||||||
{
|
{
|
||||||
|
BUG_ON(mp == NULL);
|
||||||
BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
|
BUG_ON(ibc != NULL); /* XXX - Spin mutexes not needed? */
|
||||||
BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
|
BUG_ON(type != MUTEX_DEFAULT); /* XXX - Only default type supported? */
|
||||||
|
|
||||||
mp->km_magic = KM_MAGIC;
|
mp->km_magic = KM_MAGIC;
|
||||||
|
spin_lock_init(&mp->km_lock);
|
||||||
sema_init(&mp->km_sem, 1);
|
sema_init(&mp->km_sem, 1);
|
||||||
mp->km_owner = NULL;
|
mp->km_owner = NULL;
|
||||||
mp->km_name = NULL;
|
mp->km_name = NULL;
|
||||||
|
@ -54,49 +57,65 @@ mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
||||||
static __inline__ void
|
static __inline__ void
|
||||||
mutex_destroy(kmutex_t *mp)
|
mutex_destroy(kmutex_t *mp)
|
||||||
{
|
{
|
||||||
|
BUG_ON(mp == NULL);
|
||||||
|
spin_lock(&mp->km_lock);
|
||||||
BUG_ON(mp->km_magic != KM_MAGIC);
|
BUG_ON(mp->km_magic != KM_MAGIC);
|
||||||
|
|
||||||
if (mp->km_name)
|
if (mp->km_name)
|
||||||
kfree(mp->km_name);
|
kfree(mp->km_name);
|
||||||
|
|
||||||
memset(mp, KM_POISON, sizeof(*mp));
|
memset(mp, KM_POISON, sizeof(*mp));
|
||||||
|
spin_unlock(&mp->km_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __inline__ void
|
static __inline__ void
|
||||||
mutex_enter(kmutex_t *mp)
|
mutex_enter(kmutex_t *mp)
|
||||||
{
|
{
|
||||||
|
BUG_ON(mp == NULL);
|
||||||
|
spin_lock(&mp->km_lock);
|
||||||
BUG_ON(mp->km_magic != KM_MAGIC);
|
BUG_ON(mp->km_magic != KM_MAGIC);
|
||||||
|
|
||||||
if (unlikely(in_atomic() && !current->exit_state)) {
|
if (unlikely(in_atomic() && !current->exit_state)) {
|
||||||
printk("May schedule while atomic: %s/0x%08x/%d\n",
|
printk("May schedule while atomic: %s/0x%08x/%d\n",
|
||||||
current->comm, preempt_count(), current->pid);
|
current->comm, preempt_count(), current->pid);
|
||||||
|
spin_unlock(&mp->km_lock);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
down(&mp->km_sem); /* Will check in_atomic() for us */
|
spin_unlock(&mp->km_lock);
|
||||||
|
|
||||||
|
down(&mp->km_sem);
|
||||||
|
|
||||||
|
spin_lock(&mp->km_lock);
|
||||||
BUG_ON(mp->km_owner != NULL);
|
BUG_ON(mp->km_owner != NULL);
|
||||||
mp->km_owner = current;
|
mp->km_owner = current;
|
||||||
|
spin_unlock(&mp->km_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return 1 if we acquired the mutex, else zero.
|
/* Return 1 if we acquired the mutex, else zero. */
|
||||||
*/
|
|
||||||
static __inline__ int
|
static __inline__ int
|
||||||
mutex_tryenter(kmutex_t *mp)
|
mutex_tryenter(kmutex_t *mp)
|
||||||
{
|
{
|
||||||
int result;
|
int rc;
|
||||||
|
|
||||||
|
BUG_ON(mp == NULL);
|
||||||
|
spin_lock(&mp->km_lock);
|
||||||
BUG_ON(mp->km_magic != KM_MAGIC);
|
BUG_ON(mp->km_magic != KM_MAGIC);
|
||||||
|
|
||||||
if (unlikely(in_atomic() && !current->exit_state)) {
|
if (unlikely(in_atomic() && !current->exit_state)) {
|
||||||
printk("May schedule while atomic: %s/0x%08x/%d\n",
|
printk("May schedule while atomic: %s/0x%08x/%d\n",
|
||||||
current->comm, preempt_count(), current->pid);
|
current->comm, preempt_count(), current->pid);
|
||||||
|
spin_unlock(&mp->km_lock);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
result = down_trylock(&mp->km_sem); /* returns 0 if acquired */
|
spin_unlock(&mp->km_lock);
|
||||||
if (result == 0) {
|
rc = down_trylock(&mp->km_sem); /* returns 0 if acquired */
|
||||||
|
if (rc == 0) {
|
||||||
|
spin_lock(&mp->km_lock);
|
||||||
BUG_ON(mp->km_owner != NULL);
|
BUG_ON(mp->km_owner != NULL);
|
||||||
mp->km_owner = current;
|
mp->km_owner = current;
|
||||||
|
spin_unlock(&mp->km_lock);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -105,28 +124,43 @@ mutex_tryenter(kmutex_t *mp)
|
||||||
static __inline__ void
|
static __inline__ void
|
||||||
mutex_exit(kmutex_t *mp)
|
mutex_exit(kmutex_t *mp)
|
||||||
{
|
{
|
||||||
|
BUG_ON(mp == NULL);
|
||||||
|
spin_lock(&mp->km_lock);
|
||||||
BUG_ON(mp->km_magic != KM_MAGIC);
|
BUG_ON(mp->km_magic != KM_MAGIC);
|
||||||
BUG_ON(mp->km_owner != current);
|
BUG_ON(mp->km_owner != current);
|
||||||
mp->km_owner = NULL;
|
mp->km_owner = NULL;
|
||||||
|
spin_unlock(&mp->km_lock);
|
||||||
up(&mp->km_sem);
|
up(&mp->km_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return 1 if mutex is held by current process, else zero.
|
/* Return 1 if mutex is held by current process, else zero. */
|
||||||
*/
|
|
||||||
static __inline__ int
|
static __inline__ int
|
||||||
mutex_owned(kmutex_t *mp)
|
mutex_owned(kmutex_t *mp)
|
||||||
{
|
{
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
BUG_ON(mp == NULL);
|
||||||
|
spin_lock(&mp->km_lock);
|
||||||
BUG_ON(mp->km_magic != KM_MAGIC);
|
BUG_ON(mp->km_magic != KM_MAGIC);
|
||||||
return (mp->km_owner == current);
|
rc = (mp->km_owner == current);
|
||||||
|
spin_unlock(&mp->km_lock);
|
||||||
|
|
||||||
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Return owner if mutex is owned, else NULL.
|
/* Return owner if mutex is owned, else NULL. */
|
||||||
*/
|
|
||||||
static __inline__ kthread_t *
|
static __inline__ kthread_t *
|
||||||
mutex_owner(kmutex_t *mp)
|
mutex_owner(kmutex_t *mp)
|
||||||
{
|
{
|
||||||
|
kthread_t *thr;
|
||||||
|
|
||||||
|
BUG_ON(mp == NULL);
|
||||||
|
spin_lock(&mp->km_lock);
|
||||||
BUG_ON(mp->km_magic != KM_MAGIC);
|
BUG_ON(mp->km_magic != KM_MAGIC);
|
||||||
return mp->km_owner;
|
thr = mp->km_owner;
|
||||||
|
spin_unlock(&mp->km_lock);
|
||||||
|
|
||||||
|
return thr;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -10,7 +10,8 @@
|
||||||
/*
|
/*
|
||||||
* Generic support
|
* Generic support
|
||||||
*/
|
*/
|
||||||
static char spl_debug_buffer[MAXMSGLEN];
|
static char spl_debug_buffer1[1024];
|
||||||
|
static char spl_debug_buffer2[1024];
|
||||||
static spinlock_t spl_debug_lock = SPIN_LOCK_UNLOCKED;
|
static spinlock_t spl_debug_lock = SPIN_LOCK_UNLOCKED;
|
||||||
|
|
||||||
unsigned long spl_debug_mask = 0;
|
unsigned long spl_debug_mask = 0;
|
||||||
|
@ -83,12 +84,11 @@ EXPORT_SYMBOL(ddi_strtoul);
|
||||||
void
|
void
|
||||||
__dprintf(const char *file, const char *func, int line, const char *fmt, ...)
|
__dprintf(const char *file, const char *func, int line, const char *fmt, ...)
|
||||||
{
|
{
|
||||||
char *sfp, *start, *ptr;
|
char *sfp;
|
||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
va_list ap;
|
va_list ap;
|
||||||
|
|
||||||
start = ptr = spl_debug_buffer;
|
|
||||||
sfp = strrchr(file, '/');
|
sfp = strrchr(file, '/');
|
||||||
do_gettimeofday(&tv);
|
do_gettimeofday(&tv);
|
||||||
|
|
||||||
|
@ -98,18 +98,21 @@ __dprintf(const char *file, const char *func, int line, const char *fmt, ...)
|
||||||
* reason why we really, really, need an internal debug log.
|
* reason why we really, really, need an internal debug log.
|
||||||
*/
|
*/
|
||||||
spin_lock_irqsave(&spl_debug_lock, flags);
|
spin_lock_irqsave(&spl_debug_lock, flags);
|
||||||
ptr += snprintf(ptr, MAXMSGLEN - 1,
|
memset(spl_debug_buffer1, 0, 1024);
|
||||||
"spl: %lu.%06lu:%d:%u:%s:%d:%s(): ",
|
memset(spl_debug_buffer2, 0, 1024);
|
||||||
tv.tv_sec, tv.tv_usec, current->pid,
|
|
||||||
smp_processor_id(),
|
snprintf(spl_debug_buffer1, 1023,
|
||||||
sfp == NULL ? file : sfp + 1,
|
"spl: %lu.%06lu:%d:%u:%s:%d:%s(): ",
|
||||||
line, func);
|
tv.tv_sec, tv.tv_usec, current->pid,
|
||||||
|
smp_processor_id(),
|
||||||
|
sfp == NULL ? file : sfp + 1,
|
||||||
|
line, func);
|
||||||
|
|
||||||
va_start(ap, fmt);
|
va_start(ap, fmt);
|
||||||
ptr += vsnprintf(ptr, MAXMSGLEN - (ptr - start) - 1, fmt, ap);
|
vsnprintf(spl_debug_buffer2, 1023, fmt, ap);
|
||||||
va_end(ap);
|
va_end(ap);
|
||||||
|
|
||||||
printk("%s", start);
|
printk("%s%s", spl_debug_buffer1, spl_debug_buffer2);
|
||||||
spin_unlock_irqrestore(&spl_debug_lock, flags);
|
spin_unlock_irqrestore(&spl_debug_lock, flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__dprintf);
|
EXPORT_SYMBOL(__dprintf);
|
||||||
|
|
|
@ -50,7 +50,6 @@ typedef struct kmem_cache_cb {
|
||||||
|
|
||||||
|
|
||||||
static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED;
|
static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED;
|
||||||
//static spinlock_t kmem_cache_cb_lock = (spinlock_t) { 1 SPINLOCK_MAGIC_INIT };
|
|
||||||
static LIST_HEAD(kmem_cache_cb_list);
|
static LIST_HEAD(kmem_cache_cb_list);
|
||||||
static struct shrinker *kmem_cache_shrinker;
|
static struct shrinker *kmem_cache_shrinker;
|
||||||
|
|
||||||
|
@ -110,17 +109,22 @@ static void
|
||||||
kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
|
kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
|
||||||
{
|
{
|
||||||
kmem_cache_cb_t *kcc;
|
kmem_cache_cb_t *kcc;
|
||||||
|
kmem_constructor_t constructor;
|
||||||
|
void *private;
|
||||||
|
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
spin_lock(&kmem_cache_cb_lock);
|
||||||
|
|
||||||
/* Callback list must be in sync with linux slab caches */
|
/* Callback list must be in sync with linux slab caches */
|
||||||
kcc = kmem_cache_find_cache_cb(cache);
|
kcc = kmem_cache_find_cache_cb(cache);
|
||||||
BUG_ON(!kcc);
|
BUG_ON(!kcc);
|
||||||
|
constructor = kcc->kcc_constructor;
|
||||||
if (kcc->kcc_constructor)
|
private = kcc->kcc_private;
|
||||||
kcc->kcc_constructor(ptr, kcc->kcc_private, (int)flags);
|
|
||||||
|
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock(&kmem_cache_cb_lock);
|
||||||
|
|
||||||
|
if (constructor)
|
||||||
|
constructor(ptr, private, (int)flags);
|
||||||
|
|
||||||
/* Linux constructor has no return code, silently eat it */
|
/* Linux constructor has no return code, silently eat it */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,18 +132,22 @@ static void
|
||||||
kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
|
kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
|
||||||
{
|
{
|
||||||
kmem_cache_cb_t *kcc;
|
kmem_cache_cb_t *kcc;
|
||||||
|
kmem_destructor_t destructor;
|
||||||
|
void *private;
|
||||||
|
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
spin_lock(&kmem_cache_cb_lock);
|
||||||
|
|
||||||
/* Callback list must be in sync with linux slab caches */
|
/* Callback list must be in sync with linux slab caches */
|
||||||
kcc = kmem_cache_find_cache_cb(cache);
|
kcc = kmem_cache_find_cache_cb(cache);
|
||||||
BUG_ON(!kcc);
|
BUG_ON(!kcc);
|
||||||
|
destructor = kcc->kcc_destructor;
|
||||||
/* Solaris destructor takes no flags, silently eat them */
|
private = kcc->kcc_private;
|
||||||
if (kcc->kcc_destructor)
|
|
||||||
kcc->kcc_destructor(ptr, kcc->kcc_private);
|
|
||||||
|
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock(&kmem_cache_cb_lock);
|
||||||
|
|
||||||
|
/* Solaris destructor takes no flags, silently eat them */
|
||||||
|
if (destructor)
|
||||||
|
destructor(ptr, private);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX - Arguments are ignored */
|
/* XXX - Arguments are ignored */
|
||||||
|
|
Loading…
Reference in New Issue