Lots of fixes here:

- Detailed kmem memory allocation tracking.  We can now get on
  spl module unload a list of all memory allocations which were
  not free'd and where the original alloc was.  E.g.

SPL: 15554:632:(spl-kmem.c:442:kmem_fini()) kmem leaked 90/319332 bytes
SPL: 15554:648:(spl-kmem.c:451:kmem_fini()) address          size  data             func:line
SPL: 15554:648:(spl-kmem.c:457:kmem_fini()) ffff8100734b68b8 32    0100000001005a5a __spl_mutex_init:70
SPL: 15554:648:(spl-kmem.c:457:kmem_fini()) ffff8100734b6148 13    &tl->tl_lock     __spl_mutex_init:74
SPL: 15554:648:(spl-kmem.c:457:kmem_fini()) ffff81007ac43730 32    0100000001005a5a __spl_mutex_init:70
SPL: 15554:648:(spl-kmem.c:457:kmem_fini()) ffff81007ac437d8 13    &tl->tl_lock     __spl_mutex_init:74

- Shift to using rwsems in kmem implmentation, to simply locking and
  improve concurency.

- Shift to using rwsems in mutex implementation, additionally ensure we
  never sleep in the init function if non-zero preempt_count or 
  interrupts are disabled as can happen in a slab cache ctor/dtor.

- Other minor formating fixes and such.

TODO:

- Finish the vmem memory allocation tracking

- Vet all other SPL primatives for potential sleeping during *_init.  I
suspect the rwlock implemenation does this and should be fixes just
like the mutex implemenation.



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@95 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-05-06 20:38:28 +00:00
parent 9ab1ac14ad
commit d6a26c6a32
6 changed files with 302 additions and 78 deletions

View File

@ -13,6 +13,9 @@ extern "C" {
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/hash.h>
#include <linux/ctype.h>
#include <sys/debug.h> #include <sys/debug.h>
/* /*
* Memory allocation interfaces * Memory allocation interfaces
@ -31,33 +34,97 @@ extern atomic64_t vmem_alloc_used;
extern unsigned long vmem_alloc_max; extern unsigned long vmem_alloc_max;
extern int kmem_warning_flag; extern int kmem_warning_flag;
#define KMEM_HASH_BITS 10
#define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
extern struct list_head kmem_list;
extern spinlock_t kmem_lock;
typedef struct kmem_debug {
struct hlist_node kd_hlist; /* Hash node linkage */
struct list_head kd_list; /* List of all allocations */
void *kd_addr; /* Allocation pointer */
size_t kd_size; /* Allocation size */
const char *kd_func; /* Allocation function */
int kd_line; /* Allocation line */
} kmem_debug_t;
static __inline__ kmem_debug_t *
__kmem_del_init(void *addr)
{
struct hlist_head *head;
struct hlist_node *node;
struct kmem_debug *p;
unsigned long flags;
spin_lock_irqsave(&kmem_lock, flags);
head = &kmem_table[hash_ptr(addr, KMEM_HASH_BITS)];
hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
if (p->kd_addr == addr) {
hlist_del_init(&p->kd_hlist);
list_del_init(&p->kd_list);
spin_unlock_irqrestore(&kmem_lock, flags);
return p;
}
}
spin_unlock_irqrestore(&kmem_lock, flags);
return NULL;
}
#define __kmem_alloc(size, flags, allocator) \ #define __kmem_alloc(size, flags, allocator) \
({ void *_ptr_; \ ({ void *_ptr_ = NULL; \
kmem_debug_t *_dptr_; \
unsigned long _flags_; \
\ \
/* Marked unlikely because we should never be doing this */ \ _dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
if (unlikely((size) > (PAGE_SIZE * 4)) && kmem_warning_flag) \ if (_dptr_ == NULL) { \
__CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
"kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
(int)(size), (int)(flags), \
atomic64_read(&kmem_alloc_used), \
kmem_alloc_max); \
\
_ptr_ = (void *)allocator((size), (flags)); \
if (_ptr_ == NULL) { \
__CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \ __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
"kmem_alloc(%d, 0x%x) failed (%ld/%ld)\n", \ "kmem_alloc(%d, 0x%x) debug failed\n", \
(int)(size), (int)(flags), \ sizeof(kmem_debug_t), (int)(flags)); \
atomic64_read(&kmem_alloc_used), \
kmem_alloc_max); \
} else { \ } else { \
atomic64_add((size), &kmem_alloc_used); \ /* Marked unlikely because we should never be doing this */ \
if (unlikely(atomic64_read(&kmem_alloc_used)>kmem_alloc_max)) \ if (unlikely((size) > (PAGE_SIZE * 4)) && kmem_warning_flag) \
kmem_alloc_max = atomic64_read(&kmem_alloc_used); \ __CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
\ "kmem_alloc(%d, 0x%x) (%ld/%ld)\n", \
__CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(%d, 0x%x) = %p " \ (int)(size), (int)(flags), \
"(%ld/%ld)\n", (int)(size), (int)(flags), \ atomic64_read(&kmem_alloc_used), \
_ptr_, atomic64_read(&kmem_alloc_used), \ kmem_alloc_max); \
kmem_alloc_max); \ \
_ptr_ = (void *)allocator((size), (flags)); \
if (_ptr_ == NULL) { \
kfree(_dptr_); \
__CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
"kmem_alloc(%d, 0x%x) failed (%ld/" \
"%ld)\n", (int)(size), (int)(flags), \
atomic64_read(&kmem_alloc_used), \
kmem_alloc_max); \
} else { \
atomic64_add((size), &kmem_alloc_used); \
if (unlikely(atomic64_read(&kmem_alloc_used) > \
kmem_alloc_max)) \
kmem_alloc_max = \
atomic64_read(&kmem_alloc_used); \
\
INIT_HLIST_NODE(&_dptr_->kd_hlist); \
INIT_LIST_HEAD(&_dptr_->kd_list); \
_dptr_->kd_addr = _ptr_; \
_dptr_->kd_size = (size); \
_dptr_->kd_func = __FUNCTION__; \
_dptr_->kd_line = __LINE__; \
spin_lock_irqsave(&kmem_lock, _flags_); \
hlist_add_head_rcu(&_dptr_->kd_hlist, \
&kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
list_add_tail(&_dptr_->kd_list, &kmem_list); \
spin_unlock_irqrestore(&kmem_lock, _flags_); \
\
__CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
"%d, 0x%x) = %p (%ld/%ld)\n", \
(int)(size), (int)(flags), _ptr_, \
atomic64_read(&kmem_alloc_used), \
kmem_alloc_max); \
} \
} \ } \
\ \
_ptr_; \ _ptr_; \
@ -68,12 +135,23 @@ extern int kmem_warning_flag;
#define kmem_free(ptr, size) \ #define kmem_free(ptr, size) \
({ \ ({ \
kmem_debug_t *_dptr_; \
ASSERT((ptr) || (size > 0)); \ ASSERT((ptr) || (size > 0)); \
\
_dptr_ = __kmem_del_init(ptr); \
ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
"kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
_dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
atomic64_sub((size), &kmem_alloc_used); \ atomic64_sub((size), &kmem_alloc_used); \
__CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \ __CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
(ptr), (int)(size), atomic64_read(&kmem_alloc_used), \ (ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
kmem_alloc_max); \ kmem_alloc_max); \
memset(ptr, 0x5a, (size)); /* Poison */ \ \
memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
kfree(_dptr_); \
\
memset(ptr, 0x5a, (size)); \
kfree(ptr); \ kfree(ptr); \
}) })

View File

@ -17,12 +17,20 @@ atomic64_t vmem_alloc_used;
unsigned long vmem_alloc_max = 0; unsigned long vmem_alloc_max = 0;
int kmem_warning_flag = 1; int kmem_warning_flag = 1;
spinlock_t kmem_lock;
struct hlist_head kmem_table[KMEM_TABLE_SIZE];
struct list_head kmem_list;
EXPORT_SYMBOL(kmem_alloc_used); EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max); EXPORT_SYMBOL(kmem_alloc_max);
EXPORT_SYMBOL(vmem_alloc_used); EXPORT_SYMBOL(vmem_alloc_used);
EXPORT_SYMBOL(vmem_alloc_max); EXPORT_SYMBOL(vmem_alloc_max);
EXPORT_SYMBOL(kmem_warning_flag); EXPORT_SYMBOL(kmem_warning_flag);
EXPORT_SYMBOL(kmem_lock);
EXPORT_SYMBOL(kmem_table);
EXPORT_SYMBOL(kmem_list);
int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); } int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
#else #else
int kmem_set_warning(int flag) { return 0; } int kmem_set_warning(int flag) { return 0; }
@ -44,7 +52,11 @@ EXPORT_SYMBOL(kmem_set_warning);
* solaris style callback is needed. There is some overhead in this * solaris style callback is needed. There is some overhead in this
* operation which isn't horibile but it needs to be kept in mind. * operation which isn't horibile but it needs to be kept in mind.
*/ */
#define KCC_MAGIC 0x7a7a7a7a
#define KCC_POISON 0x77
typedef struct kmem_cache_cb { typedef struct kmem_cache_cb {
int kcc_magic;
struct list_head kcc_list; struct list_head kcc_list;
kmem_cache_t * kcc_cache; kmem_cache_t * kcc_cache;
kmem_constructor_t kcc_constructor; kmem_constructor_t kcc_constructor;
@ -52,14 +64,14 @@ typedef struct kmem_cache_cb {
kmem_reclaim_t kcc_reclaim; kmem_reclaim_t kcc_reclaim;
void * kcc_private; void * kcc_private;
void * kcc_vmp; void * kcc_vmp;
atomic_t kcc_ref;
} kmem_cache_cb_t; } kmem_cache_cb_t;
static struct rw_semaphore kmem_cache_cb_sem;
static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED; static struct list_head kmem_cache_cb_list;
static LIST_HEAD(kmem_cache_cb_list);
static struct shrinker *kmem_cache_shrinker; static struct shrinker *kmem_cache_shrinker;
/* Function must be called while holding the kmem_cache_cb_lock /* Function must be called while holding the kmem_cache_cb_sem
* Because kmem_cache_t is an opaque datatype we're forced to * Because kmem_cache_t is an opaque datatype we're forced to
* match pointers to identify specific cache entires. * match pointers to identify specific cache entires.
*/ */
@ -67,6 +79,9 @@ static kmem_cache_cb_t *
kmem_cache_find_cache_cb(kmem_cache_t *cache) kmem_cache_find_cache_cb(kmem_cache_t *cache)
{ {
kmem_cache_cb_t *kcc; kmem_cache_cb_t *kcc;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
ASSERT(rwsem_is_locked(&kmem_cache_cb_sem));
#endif
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
if (cache == kcc->kcc_cache) if (cache == kcc->kcc_cache)
@ -83,19 +98,20 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
void *priv, void *vmp) void *priv, void *vmp)
{ {
kmem_cache_cb_t *kcc; kmem_cache_cb_t *kcc;
unsigned long flags;
kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL); kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
if (kcc) { if (kcc) {
kcc->kcc_magic = KCC_MAGIC;
kcc->kcc_cache = cache; kcc->kcc_cache = cache;
kcc->kcc_constructor = constructor; kcc->kcc_constructor = constructor;
kcc->kcc_destructor = destructor; kcc->kcc_destructor = destructor;
kcc->kcc_reclaim = reclaim; kcc->kcc_reclaim = reclaim;
kcc->kcc_private = priv; kcc->kcc_private = priv;
kcc->kcc_vmp = vmp; kcc->kcc_vmp = vmp;
spin_lock_irqsave(&kmem_cache_cb_lock, flags); atomic_set(&kcc->kcc_ref, 0);
down_write(&kmem_cache_cb_sem);
list_add(&kcc->kcc_list, &kmem_cache_cb_list); list_add(&kcc->kcc_list, &kmem_cache_cb_list);
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags); up_write(&kmem_cache_cb_sem);
} }
return kcc; return kcc;
@ -104,14 +120,15 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
static void static void
kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc) kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
{ {
unsigned long flags; down_write(&kmem_cache_cb_sem);
ASSERT(atomic_read(&kcc->kcc_ref) == 0);
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
list_del(&kcc->kcc_list); list_del(&kcc->kcc_list);
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags); up_write(&kmem_cache_cb_sem);
if (kcc) if (kcc){
kfree(kcc); memset(kcc, KCC_POISON, sizeof(*kcc));
kfree(kcc);
}
} }
static void static void
@ -119,23 +136,36 @@ kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long fla
{ {
kmem_cache_cb_t *kcc; kmem_cache_cb_t *kcc;
kmem_constructor_t constructor; kmem_constructor_t constructor;
unsigned long irqflags;
void *private; void *private;
spin_lock_irqsave(&kmem_cache_cb_lock, irqflags); /* Ensure constructor verifies are not passed to the registered
* constructors. This may not be safe due to the Solaris constructor
* not being aware of how to handle the SLAB_CTOR_VERIFY flag
*/
if (flags & SLAB_CTOR_VERIFY)
return;
/* We can be called with interrupts disabled so it is critical that
* this function and the registered constructor never sleep.
*/
while (!down_read_trylock(&kmem_cache_cb_sem));
/* Callback list must be in sync with linux slab caches */ /* Callback list must be in sync with linux slab caches */
kcc = kmem_cache_find_cache_cb(cache); kcc = kmem_cache_find_cache_cb(cache);
ASSERT(kcc); ASSERT(kcc);
ASSERT(kcc->kcc_magic == KCC_MAGIC);
atomic_inc(&kcc->kcc_ref);
constructor = kcc->kcc_constructor; constructor = kcc->kcc_constructor;
private = kcc->kcc_private; private = kcc->kcc_private;
spin_unlock_irqrestore(&kmem_cache_cb_lock, irqflags); up_read(&kmem_cache_cb_sem);
if (constructor) if (constructor)
constructor(ptr, private, (int)flags); constructor(ptr, private, (int)flags);
atomic_dec(&kcc->kcc_ref);
/* Linux constructor has no return code, silently eat it */ /* Linux constructor has no return code, silently eat it */
} }
@ -144,23 +174,29 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
{ {
kmem_cache_cb_t *kcc; kmem_cache_cb_t *kcc;
kmem_destructor_t destructor; kmem_destructor_t destructor;
unsigned long irqflags;
void *private; void *private;
spin_lock_irqsave(&kmem_cache_cb_lock, irqflags); /* We can be called with interrupts disabled so it is critical that
* this function and the registered constructor never sleep.
*/
while (!down_read_trylock(&kmem_cache_cb_sem));
/* Callback list must be in sync with linux slab caches */ /* Callback list must be in sync with linux slab caches */
kcc = kmem_cache_find_cache_cb(cache); kcc = kmem_cache_find_cache_cb(cache);
ASSERT(kcc); ASSERT(kcc);
ASSERT(kcc->kcc_magic == KCC_MAGIC);
atomic_inc(&kcc->kcc_ref);
destructor = kcc->kcc_destructor; destructor = kcc->kcc_destructor;
private = kcc->kcc_private; private = kcc->kcc_private;
spin_unlock_irqrestore(&kmem_cache_cb_lock, irqflags); up_read(&kmem_cache_cb_sem);
/* Solaris destructor takes no flags, silently eat them */ /* Solaris destructor takes no flags, silently eat them */
if (destructor) if (destructor)
destructor(ptr, private); destructor(ptr, private);
atomic_dec(&kcc->kcc_ref);
} }
/* XXX - Arguments are ignored */ /* XXX - Arguments are ignored */
@ -168,7 +204,6 @@ static int
kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask) kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
{ {
kmem_cache_cb_t *kcc; kmem_cache_cb_t *kcc;
unsigned long flags;
int total = 0; int total = 0;
/* Under linux a shrinker is not tightly coupled with a slab /* Under linux a shrinker is not tightly coupled with a slab
@ -178,9 +213,23 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
* function in the shim layer for all slab caches. And we always * function in the shim layer for all slab caches. And we always
* attempt to shrink all caches when this generic shrinker is called. * attempt to shrink all caches when this generic shrinker is called.
*/ */
spin_lock_irqsave(&kmem_cache_cb_lock, flags); down_read(&kmem_cache_cb_sem);
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) { list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
ASSERT(kcc);
ASSERT(kcc->kcc_magic == KCC_MAGIC);
/* Take a reference on the cache in question. If that
* cache is contended simply skip it, it may already be
* in the process of a reclaim or the ctor/dtor may be
* running in either case it's best to skip it.
*/
atomic_inc(&kcc->kcc_ref);
if (atomic_read(&kcc->kcc_ref) > 1) {
atomic_dec(&kcc->kcc_ref);
continue;
}
/* Under linux the desired number and gfp type of objects /* Under linux the desired number and gfp type of objects
* is passed to the reclaiming function as a sugested reclaim * is passed to the reclaiming function as a sugested reclaim
* target. I do not pass these args on because reclaim * target. I do not pass these args on because reclaim
@ -190,6 +239,7 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
if (kcc->kcc_reclaim) if (kcc->kcc_reclaim)
kcc->kcc_reclaim(kcc->kcc_private); kcc->kcc_reclaim(kcc->kcc_private);
atomic_dec(&kcc->kcc_ref);
total += 1; total += 1;
} }
@ -199,7 +249,8 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
* was registered with the generic shrinker. This should fake out * was registered with the generic shrinker. This should fake out
* the linux VM when it attempts to shrink caches. * the linux VM when it attempts to shrink caches.
*/ */
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags); up_read(&kmem_cache_cb_sem);
return total; return total;
} }
@ -238,18 +289,18 @@ __kmem_cache_create(char *name, size_t size, size_t align,
RETURN(NULL); RETURN(NULL);
/* Register shared shrinker function on initial cache create */ /* Register shared shrinker function on initial cache create */
spin_lock(&kmem_cache_cb_lock); down_read(&kmem_cache_cb_sem);
if (list_empty(&kmem_cache_cb_list)) { if (list_empty(&kmem_cache_cb_list)) {
kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS, kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
kmem_cache_generic_shrinker); kmem_cache_generic_shrinker);
if (kmem_cache_shrinker == NULL) { if (kmem_cache_shrinker == NULL) {
kmem_cache_destroy(cache); kmem_cache_destroy(cache);
spin_unlock(&kmem_cache_cb_lock); up_read(&kmem_cache_cb_sem);
RETURN(NULL); RETURN(NULL);
} }
} }
spin_unlock(&kmem_cache_cb_lock); up_read(&kmem_cache_cb_sem);
kcc = kmem_cache_add_cache_cb(cache, constructor, destructor, kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
reclaim, priv, vmp); reclaim, priv, vmp);
@ -272,27 +323,31 @@ __kmem_cache_destroy(kmem_cache_t *cache)
{ {
kmem_cache_cb_t *kcc; kmem_cache_cb_t *kcc;
char *name; char *name;
unsigned long flags;
int rc; int rc;
ENTRY; ENTRY;
spin_lock_irqsave(&kmem_cache_cb_lock, flags); down_read(&kmem_cache_cb_sem);
kcc = kmem_cache_find_cache_cb(cache); kcc = kmem_cache_find_cache_cb(cache);
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags); if (kcc == NULL) {
if (kcc == NULL) up_read(&kmem_cache_cb_sem);
RETURN(-EINVAL); RETURN(-EINVAL);
}
atomic_inc(&kcc->kcc_ref);
up_read(&kmem_cache_cb_sem);
name = (char *)kmem_cache_name(cache); name = (char *)kmem_cache_name(cache);
rc = kmem_cache_destroy(cache); rc = kmem_cache_destroy(cache);
atomic_dec(&kcc->kcc_ref);
kmem_cache_remove_cache_cb(kcc); kmem_cache_remove_cache_cb(kcc);
kfree(name); kfree(name);
/* Unregister generic shrinker on removal of all caches */ /* Unregister generic shrinker on removal of all caches */
spin_lock_irqsave(&kmem_cache_cb_lock, flags); down_read(&kmem_cache_cb_sem);
if (list_empty(&kmem_cache_cb_list)) if (list_empty(&kmem_cache_cb_list))
remove_shrinker(kmem_cache_shrinker); remove_shrinker(kmem_cache_shrinker);
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags); up_read(&kmem_cache_cb_sem);
RETURN(rc); RETURN(rc);
} }
EXPORT_SYMBOL(__kmem_cache_destroy); EXPORT_SYMBOL(__kmem_cache_destroy);
@ -312,25 +367,101 @@ int
kmem_init(void) kmem_init(void)
{ {
ENTRY; ENTRY;
init_rwsem(&kmem_cache_cb_sem);
INIT_LIST_HEAD(&kmem_cache_cb_list);
#ifdef DEBUG_KMEM #ifdef DEBUG_KMEM
atomic64_set(&kmem_alloc_used, 0); {
atomic64_set(&vmem_alloc_used, 0); int i;
atomic64_set(&kmem_alloc_used, 0);
atomic64_set(&vmem_alloc_used, 0);
spin_lock_init(&kmem_lock);
INIT_LIST_HEAD(&kmem_list);
for (i = 0; i < KMEM_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
}
#endif #endif
RETURN(0); RETURN(0);
} }
static char *sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
{
int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
int i, flag = 1;
ASSERT(str != NULL && len >= 17);
memset(str, 0, len);
/* Check for a fully printable string, and while we are at
* it place the printable characters in the passed buffer. */
for (i = 0; i < size; i++) {
str[i] = ((char *)(kd->kd_addr))[i];
if (isprint(str[i])) {
continue;
} else {
/* Minimum number of printable characters found
* to make it worthwhile to print this as ascii. */
if (i > min)
break;
flag = 0;
break;
}
}
if (!flag) {
sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
*((uint8_t *)kd->kd_addr),
*((uint8_t *)kd->kd_addr + 2),
*((uint8_t *)kd->kd_addr + 4),
*((uint8_t *)kd->kd_addr + 6),
*((uint8_t *)kd->kd_addr + 8),
*((uint8_t *)kd->kd_addr + 10),
*((uint8_t *)kd->kd_addr + 12),
*((uint8_t *)kd->kd_addr + 14));
}
return str;
}
void void
kmem_fini(void) kmem_fini(void)
{ {
ENTRY; ENTRY;
#ifdef DEBUG_KMEM #ifdef DEBUG_KMEM
if (atomic64_read(&kmem_alloc_used) != 0) {
CWARN("kmem leaked %ld/%ld bytes\n", unsigned long flags;
atomic_read(&kmem_alloc_used), kmem_alloc_max); kmem_debug_t *kd;
char str[17];
if (atomic64_read(&vmem_alloc_used) != 0) if (atomic64_read(&kmem_alloc_used) != 0)
CWARN("vmem leaked %ld/%ld bytes\n", CWARN("kmem leaked %ld/%ld bytes\n",
atomic_read(&vmem_alloc_used), vmem_alloc_max); atomic_read(&kmem_alloc_used), kmem_alloc_max);
/* Display all unreclaimed memory addresses, including the
* allocation size and the first few bytes of what's located
* at that address to aid in debugging. Performance is not
* a serious concern here since it is module unload time. */
spin_lock_irqsave(&kmem_lock, flags);
if (!list_empty(&kmem_list))
CDEBUG(D_WARNING, "%-16s %-5s %-16s %s:%s\n",
"address", "size", "data", "func", "line");
list_for_each_entry(kd, &kmem_list, kd_list) {
CDEBUG(D_WARNING, "%p %-5d %-16s %s:%d\n",
kd->kd_addr, kd->kd_size,
sprintf_addr(kd, str, 17, 8),
kd->kd_func, kd->kd_line);
}
spin_unlock_irqrestore(&kmem_lock, flags);
if (atomic64_read(&vmem_alloc_used) != 0)
CWARN("vmem leaked %ld/%ld bytes\n",
atomic_read(&vmem_alloc_used), vmem_alloc_max);
}
#endif #endif
EXIT; EXIT;
} }

View File

@ -29,13 +29,15 @@ int mutex_spin_max = 100;
#ifdef DEBUG_MUTEX #ifdef DEBUG_MUTEX
int mutex_stats[MUTEX_STATS_SIZE] = { 0 }; int mutex_stats[MUTEX_STATS_SIZE] = { 0 };
DEFINE_MUTEX(mutex_stats_lock); struct rw_semaphore mutex_stats_sem;
LIST_HEAD(mutex_stats_list); LIST_HEAD(mutex_stats_list);
#endif #endif
void void
__spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc) __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
{ {
int flags = KM_SLEEP;
ASSERT(mp); ASSERT(mp);
ASSERT(name); ASSERT(name);
ASSERT(ibc == NULL); ASSERT(ibc == NULL);
@ -58,12 +60,18 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
SBUG(); SBUG();
} }
/* We may be called when there is a non-zero preempt_count or
* interrupts are disabled is which case we must not sleep.
*/
if (current_thread_info()->preempt_count || irqs_disabled())
flags = KM_NOSLEEP;
/* Semaphore kmem_alloc'ed to keep struct size down (<64b) */ /* Semaphore kmem_alloc'ed to keep struct size down (<64b) */
mp->km_sem = kmem_alloc(sizeof(struct semaphore), KM_SLEEP); mp->km_sem = kmem_alloc(sizeof(struct semaphore), flags);
if (mp->km_sem == NULL) if (mp->km_sem == NULL)
return; return;
mp->km_name = kmem_alloc(mp->km_name_size, KM_SLEEP); mp->km_name = kmem_alloc(mp->km_name_size, flags);
if (mp->km_name == NULL) { if (mp->km_name == NULL) {
kmem_free(mp->km_sem, sizeof(struct semaphore)); kmem_free(mp->km_sem, sizeof(struct semaphore));
return; return;
@ -73,16 +81,19 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
strcpy(mp->km_name, name); strcpy(mp->km_name, name);
#ifdef DEBUG_MUTEX #ifdef DEBUG_MUTEX
mp->km_stats = kmem_zalloc(sizeof(int) * MUTEX_STATS_SIZE, KM_SLEEP); mp->km_stats = kmem_zalloc(sizeof(int) * MUTEX_STATS_SIZE, flags);
if (mp->km_stats == NULL) { if (mp->km_stats == NULL) {
kmem_free(mp->km_name, mp->km_name_size); kmem_free(mp->km_name, mp->km_name_size);
kmem_free(mp->km_sem, sizeof(struct semaphore)); kmem_free(mp->km_sem, sizeof(struct semaphore));
return; return;
} }
mutex_lock(&mutex_stats_lock); /* We may be called when there is a non-zero preempt_count or
* interrupts are disabled is which case we must not sleep.
*/
while (!down_write_trylock(&mutex_stats_sem));
list_add_tail(&mp->km_list, &mutex_stats_list); list_add_tail(&mp->km_list, &mutex_stats_list);
mutex_unlock(&mutex_stats_lock); up_write(&mutex_stats_sem);
#endif #endif
} }
EXPORT_SYMBOL(__spl_mutex_init); EXPORT_SYMBOL(__spl_mutex_init);
@ -94,9 +105,12 @@ __spl_mutex_destroy(kmutex_t *mp)
ASSERT(mp->km_magic == KM_MAGIC); ASSERT(mp->km_magic == KM_MAGIC);
#ifdef DEBUG_MUTEX #ifdef DEBUG_MUTEX
mutex_lock(&mutex_stats_lock); /* We may be called when there is a non-zero preempt_count or
* interrupts are disabled is which case we must not sleep.
*/
while (!down_write_trylock(&mutex_stats_sem));
list_del_init(&mp->km_list); list_del_init(&mp->km_list);
mutex_unlock(&mutex_stats_lock); up_write(&mutex_stats_sem);
kmem_free(mp->km_stats, sizeof(int) * MUTEX_STATS_SIZE); kmem_free(mp->km_stats, sizeof(int) * MUTEX_STATS_SIZE);
#endif #endif

View File

@ -426,7 +426,7 @@ mutex_seq_start(struct seq_file *f, loff_t *pos)
loff_t n = *pos; loff_t n = *pos;
ENTRY; ENTRY;
mutex_lock(&mutex_stats_lock); down_read(&mutex_stats_sem);
if (!n) if (!n)
mutex_seq_show_headers(f); mutex_seq_show_headers(f);
@ -454,7 +454,7 @@ mutex_seq_next(struct seq_file *f, void *p, loff_t *pos)
static void static void
mutex_seq_stop(struct seq_file *f, void *v) mutex_seq_stop(struct seq_file *f, void *v)
{ {
mutex_unlock(&mutex_stats_lock); up_read(&mutex_stats_sem);
} }
static struct seq_operations mutex_seq_ops = { static struct seq_operations mutex_seq_ops = {

View File

@ -138,7 +138,7 @@ typedef struct splat_info {
#define sym2str(sym) (char *)(#sym) #define sym2str(sym) (char *)(#sym)
#define splat_print(file, format, args...) \ #define splat_print(file, format, args...) \
({ splat_info_t *_info_ = (splat_info_t *)file->private_data; \ ({ splat_info_t *_info_ = (splat_info_t *)file->private_data; \
int _rc_; \ int _rc_; \
\ \
ASSERT(_info_); \ ASSERT(_info_); \
@ -160,7 +160,7 @@ typedef struct splat_info {
_rc_; \ _rc_; \
}) })
#define splat_vprint(file, test, format, args...) \ #define splat_vprint(file, test, format, args...) \
splat_print(file, "%*s: " format, SPLAT_NAME_SIZE, test, args) splat_print(file, "%*s: " format, SPLAT_NAME_SIZE, test, args)
splat_subsystem_t * splat_condvar_init(void); splat_subsystem_t * splat_condvar_init(void);

View File

@ -148,7 +148,7 @@ splat_kmem_test34_constructor(void *ptr, void *priv, int flags)
kcd->kcd_flag = 1; kcd->kcd_flag = 1;
if (kcp) { if (kcp) {
kcd->kcd_magic = kcp->kcp_magic; kcd->kcd_magic = kcp->kcp_magic;
kcp->kcp_count++; kcp->kcp_count++;
} }
} }
@ -258,8 +258,8 @@ splat_kmem_test4_reclaim(void *priv)
int i; int i;
splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST4_NAME, splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST4_NAME,
"Reaping %d objects from '%s'\n", "Reaping %d objects from '%s'\n",
SPLAT_KMEM_OBJ_RECLAIM, SPLAT_KMEM_CACHE_NAME); SPLAT_KMEM_OBJ_RECLAIM, SPLAT_KMEM_CACHE_NAME);
for (i = 0; i < SPLAT_KMEM_OBJ_RECLAIM; i++) { for (i = 0; i < SPLAT_KMEM_OBJ_RECLAIM; i++) {
if (kcp->kcp_kcd[i]) { if (kcp->kcp_kcd[i]) {
kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]); kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
@ -306,6 +306,7 @@ splat_kmem_test4(struct file *file, void *arg)
} }
max = kcp.kcp_count; max = kcp.kcp_count;
ASSERT(max > 0);
/* Force shrinker to run */ /* Force shrinker to run */
kmem_reap(); kmem_reap();