Lots of fixes here:
- Detailed kmem memory allocation tracking. We can now get on spl module unload a list of all memory allocations which were not free'd and where the original alloc was. E.g. SPL: 15554:632:(spl-kmem.c:442:kmem_fini()) kmem leaked 90/319332 bytes SPL: 15554:648:(spl-kmem.c:451:kmem_fini()) address size data func:line SPL: 15554:648:(spl-kmem.c:457:kmem_fini()) ffff8100734b68b8 32 0100000001005a5a __spl_mutex_init:70 SPL: 15554:648:(spl-kmem.c:457:kmem_fini()) ffff8100734b6148 13 &tl->tl_lock __spl_mutex_init:74 SPL: 15554:648:(spl-kmem.c:457:kmem_fini()) ffff81007ac43730 32 0100000001005a5a __spl_mutex_init:70 SPL: 15554:648:(spl-kmem.c:457:kmem_fini()) ffff81007ac437d8 13 &tl->tl_lock __spl_mutex_init:74 - Shift to using rwsems in kmem implmentation, to simply locking and improve concurency. - Shift to using rwsems in mutex implementation, additionally ensure we never sleep in the init function if non-zero preempt_count or interrupts are disabled as can happen in a slab cache ctor/dtor. - Other minor formating fixes and such. TODO: - Finish the vmem memory allocation tracking - Vet all other SPL primatives for potential sleeping during *_init. I suspect the rwlock implemenation does this and should be fixes just like the mutex implemenation. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@95 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
parent
9ab1ac14ad
commit
d6a26c6a32
|
@ -13,6 +13,9 @@ extern "C" {
|
|||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <sys/debug.h>
|
||||
/*
|
||||
* Memory allocation interfaces
|
||||
|
@ -31,9 +34,56 @@ extern atomic64_t vmem_alloc_used;
|
|||
extern unsigned long vmem_alloc_max;
|
||||
extern int kmem_warning_flag;
|
||||
|
||||
#define KMEM_HASH_BITS 10
|
||||
#define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
|
||||
|
||||
extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
|
||||
extern struct list_head kmem_list;
|
||||
extern spinlock_t kmem_lock;
|
||||
|
||||
typedef struct kmem_debug {
|
||||
struct hlist_node kd_hlist; /* Hash node linkage */
|
||||
struct list_head kd_list; /* List of all allocations */
|
||||
void *kd_addr; /* Allocation pointer */
|
||||
size_t kd_size; /* Allocation size */
|
||||
const char *kd_func; /* Allocation function */
|
||||
int kd_line; /* Allocation line */
|
||||
} kmem_debug_t;
|
||||
|
||||
static __inline__ kmem_debug_t *
|
||||
__kmem_del_init(void *addr)
|
||||
{
|
||||
struct hlist_head *head;
|
||||
struct hlist_node *node;
|
||||
struct kmem_debug *p;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&kmem_lock, flags);
|
||||
head = &kmem_table[hash_ptr(addr, KMEM_HASH_BITS)];
|
||||
hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
|
||||
if (p->kd_addr == addr) {
|
||||
hlist_del_init(&p->kd_hlist);
|
||||
list_del_init(&p->kd_list);
|
||||
spin_unlock_irqrestore(&kmem_lock, flags);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&kmem_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define __kmem_alloc(size, flags, allocator) \
|
||||
({ void *_ptr_; \
|
||||
({ void *_ptr_ = NULL; \
|
||||
kmem_debug_t *_dptr_; \
|
||||
unsigned long _flags_; \
|
||||
\
|
||||
_dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
|
||||
if (_dptr_ == NULL) { \
|
||||
__CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
|
||||
"kmem_alloc(%d, 0x%x) debug failed\n", \
|
||||
sizeof(kmem_debug_t), (int)(flags)); \
|
||||
} else { \
|
||||
/* Marked unlikely because we should never be doing this */ \
|
||||
if (unlikely((size) > (PAGE_SIZE * 4)) && kmem_warning_flag) \
|
||||
__CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning large " \
|
||||
|
@ -44,20 +94,37 @@ extern int kmem_warning_flag;
|
|||
\
|
||||
_ptr_ = (void *)allocator((size), (flags)); \
|
||||
if (_ptr_ == NULL) { \
|
||||
kfree(_dptr_); \
|
||||
__CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
|
||||
"kmem_alloc(%d, 0x%x) failed (%ld/%ld)\n", \
|
||||
(int)(size), (int)(flags), \
|
||||
"kmem_alloc(%d, 0x%x) failed (%ld/" \
|
||||
"%ld)\n", (int)(size), (int)(flags), \
|
||||
atomic64_read(&kmem_alloc_used), \
|
||||
kmem_alloc_max); \
|
||||
} else { \
|
||||
atomic64_add((size), &kmem_alloc_used); \
|
||||
if (unlikely(atomic64_read(&kmem_alloc_used)>kmem_alloc_max)) \
|
||||
kmem_alloc_max = atomic64_read(&kmem_alloc_used); \
|
||||
if (unlikely(atomic64_read(&kmem_alloc_used) > \
|
||||
kmem_alloc_max)) \
|
||||
kmem_alloc_max = \
|
||||
atomic64_read(&kmem_alloc_used); \
|
||||
\
|
||||
__CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(%d, 0x%x) = %p " \
|
||||
"(%ld/%ld)\n", (int)(size), (int)(flags), \
|
||||
_ptr_, atomic64_read(&kmem_alloc_used), \
|
||||
INIT_HLIST_NODE(&_dptr_->kd_hlist); \
|
||||
INIT_LIST_HEAD(&_dptr_->kd_list); \
|
||||
_dptr_->kd_addr = _ptr_; \
|
||||
_dptr_->kd_size = (size); \
|
||||
_dptr_->kd_func = __FUNCTION__; \
|
||||
_dptr_->kd_line = __LINE__; \
|
||||
spin_lock_irqsave(&kmem_lock, _flags_); \
|
||||
hlist_add_head_rcu(&_dptr_->kd_hlist, \
|
||||
&kmem_table[hash_ptr(_ptr_, KMEM_HASH_BITS)]);\
|
||||
list_add_tail(&_dptr_->kd_list, &kmem_list); \
|
||||
spin_unlock_irqrestore(&kmem_lock, _flags_); \
|
||||
\
|
||||
__CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_alloc(" \
|
||||
"%d, 0x%x) = %p (%ld/%ld)\n", \
|
||||
(int)(size), (int)(flags), _ptr_, \
|
||||
atomic64_read(&kmem_alloc_used), \
|
||||
kmem_alloc_max); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
_ptr_; \
|
||||
|
@ -68,12 +135,23 @@ extern int kmem_warning_flag;
|
|||
|
||||
#define kmem_free(ptr, size) \
|
||||
({ \
|
||||
kmem_debug_t *_dptr_; \
|
||||
ASSERT((ptr) || (size > 0)); \
|
||||
\
|
||||
_dptr_ = __kmem_del_init(ptr); \
|
||||
ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
|
||||
ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
|
||||
"kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
|
||||
_dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
|
||||
atomic64_sub((size), &kmem_alloc_used); \
|
||||
__CDEBUG_LIMIT(S_KMEM, D_INFO, "kmem_free(%p, %d) (%ld/%ld)\n", \
|
||||
(ptr), (int)(size), atomic64_read(&kmem_alloc_used), \
|
||||
kmem_alloc_max); \
|
||||
memset(ptr, 0x5a, (size)); /* Poison */ \
|
||||
\
|
||||
memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
|
||||
kfree(_dptr_); \
|
||||
\
|
||||
memset(ptr, 0x5a, (size)); \
|
||||
kfree(ptr); \
|
||||
})
|
||||
|
||||
|
|
|
@ -17,12 +17,20 @@ atomic64_t vmem_alloc_used;
|
|||
unsigned long vmem_alloc_max = 0;
|
||||
int kmem_warning_flag = 1;
|
||||
|
||||
spinlock_t kmem_lock;
|
||||
struct hlist_head kmem_table[KMEM_TABLE_SIZE];
|
||||
struct list_head kmem_list;
|
||||
|
||||
EXPORT_SYMBOL(kmem_alloc_used);
|
||||
EXPORT_SYMBOL(kmem_alloc_max);
|
||||
EXPORT_SYMBOL(vmem_alloc_used);
|
||||
EXPORT_SYMBOL(vmem_alloc_max);
|
||||
EXPORT_SYMBOL(kmem_warning_flag);
|
||||
|
||||
EXPORT_SYMBOL(kmem_lock);
|
||||
EXPORT_SYMBOL(kmem_table);
|
||||
EXPORT_SYMBOL(kmem_list);
|
||||
|
||||
int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
|
||||
#else
|
||||
int kmem_set_warning(int flag) { return 0; }
|
||||
|
@ -44,7 +52,11 @@ EXPORT_SYMBOL(kmem_set_warning);
|
|||
* solaris style callback is needed. There is some overhead in this
|
||||
* operation which isn't horibile but it needs to be kept in mind.
|
||||
*/
|
||||
#define KCC_MAGIC 0x7a7a7a7a
|
||||
#define KCC_POISON 0x77
|
||||
|
||||
typedef struct kmem_cache_cb {
|
||||
int kcc_magic;
|
||||
struct list_head kcc_list;
|
||||
kmem_cache_t * kcc_cache;
|
||||
kmem_constructor_t kcc_constructor;
|
||||
|
@ -52,14 +64,14 @@ typedef struct kmem_cache_cb {
|
|||
kmem_reclaim_t kcc_reclaim;
|
||||
void * kcc_private;
|
||||
void * kcc_vmp;
|
||||
atomic_t kcc_ref;
|
||||
} kmem_cache_cb_t;
|
||||
|
||||
|
||||
static spinlock_t kmem_cache_cb_lock = SPIN_LOCK_UNLOCKED;
|
||||
static LIST_HEAD(kmem_cache_cb_list);
|
||||
static struct rw_semaphore kmem_cache_cb_sem;
|
||||
static struct list_head kmem_cache_cb_list;
|
||||
static struct shrinker *kmem_cache_shrinker;
|
||||
|
||||
/* Function must be called while holding the kmem_cache_cb_lock
|
||||
/* Function must be called while holding the kmem_cache_cb_sem
|
||||
* Because kmem_cache_t is an opaque datatype we're forced to
|
||||
* match pointers to identify specific cache entires.
|
||||
*/
|
||||
|
@ -67,6 +79,9 @@ static kmem_cache_cb_t *
|
|||
kmem_cache_find_cache_cb(kmem_cache_t *cache)
|
||||
{
|
||||
kmem_cache_cb_t *kcc;
|
||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||
ASSERT(rwsem_is_locked(&kmem_cache_cb_sem));
|
||||
#endif
|
||||
|
||||
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
|
||||
if (cache == kcc->kcc_cache)
|
||||
|
@ -83,19 +98,20 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
|
|||
void *priv, void *vmp)
|
||||
{
|
||||
kmem_cache_cb_t *kcc;
|
||||
unsigned long flags;
|
||||
|
||||
kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
|
||||
if (kcc) {
|
||||
kcc->kcc_magic = KCC_MAGIC;
|
||||
kcc->kcc_cache = cache;
|
||||
kcc->kcc_constructor = constructor;
|
||||
kcc->kcc_destructor = destructor;
|
||||
kcc->kcc_reclaim = reclaim;
|
||||
kcc->kcc_private = priv;
|
||||
kcc->kcc_vmp = vmp;
|
||||
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||
atomic_set(&kcc->kcc_ref, 0);
|
||||
down_write(&kmem_cache_cb_sem);
|
||||
list_add(&kcc->kcc_list, &kmem_cache_cb_list);
|
||||
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||
up_write(&kmem_cache_cb_sem);
|
||||
}
|
||||
|
||||
return kcc;
|
||||
|
@ -104,38 +120,52 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
|
|||
static void
|
||||
kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||
down_write(&kmem_cache_cb_sem);
|
||||
ASSERT(atomic_read(&kcc->kcc_ref) == 0);
|
||||
list_del(&kcc->kcc_list);
|
||||
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||
up_write(&kmem_cache_cb_sem);
|
||||
|
||||
if (kcc)
|
||||
if (kcc){
|
||||
memset(kcc, KCC_POISON, sizeof(*kcc));
|
||||
kfree(kcc);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
|
||||
{
|
||||
kmem_cache_cb_t *kcc;
|
||||
kmem_constructor_t constructor;
|
||||
unsigned long irqflags;
|
||||
void *private;
|
||||
|
||||
spin_lock_irqsave(&kmem_cache_cb_lock, irqflags);
|
||||
/* Ensure constructor verifies are not passed to the registered
|
||||
* constructors. This may not be safe due to the Solaris constructor
|
||||
* not being aware of how to handle the SLAB_CTOR_VERIFY flag
|
||||
*/
|
||||
if (flags & SLAB_CTOR_VERIFY)
|
||||
return;
|
||||
|
||||
/* We can be called with interrupts disabled so it is critical that
|
||||
* this function and the registered constructor never sleep.
|
||||
*/
|
||||
while (!down_read_trylock(&kmem_cache_cb_sem));
|
||||
|
||||
/* Callback list must be in sync with linux slab caches */
|
||||
kcc = kmem_cache_find_cache_cb(cache);
|
||||
ASSERT(kcc);
|
||||
ASSERT(kcc->kcc_magic == KCC_MAGIC);
|
||||
atomic_inc(&kcc->kcc_ref);
|
||||
|
||||
constructor = kcc->kcc_constructor;
|
||||
private = kcc->kcc_private;
|
||||
|
||||
spin_unlock_irqrestore(&kmem_cache_cb_lock, irqflags);
|
||||
up_read(&kmem_cache_cb_sem);
|
||||
|
||||
if (constructor)
|
||||
constructor(ptr, private, (int)flags);
|
||||
|
||||
atomic_dec(&kcc->kcc_ref);
|
||||
|
||||
/* Linux constructor has no return code, silently eat it */
|
||||
}
|
||||
|
||||
|
@ -144,23 +174,29 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
|
|||
{
|
||||
kmem_cache_cb_t *kcc;
|
||||
kmem_destructor_t destructor;
|
||||
unsigned long irqflags;
|
||||
void *private;
|
||||
|
||||
spin_lock_irqsave(&kmem_cache_cb_lock, irqflags);
|
||||
/* We can be called with interrupts disabled so it is critical that
|
||||
* this function and the registered constructor never sleep.
|
||||
*/
|
||||
while (!down_read_trylock(&kmem_cache_cb_sem));
|
||||
|
||||
/* Callback list must be in sync with linux slab caches */
|
||||
kcc = kmem_cache_find_cache_cb(cache);
|
||||
ASSERT(kcc);
|
||||
ASSERT(kcc->kcc_magic == KCC_MAGIC);
|
||||
atomic_inc(&kcc->kcc_ref);
|
||||
|
||||
destructor = kcc->kcc_destructor;
|
||||
private = kcc->kcc_private;
|
||||
|
||||
spin_unlock_irqrestore(&kmem_cache_cb_lock, irqflags);
|
||||
up_read(&kmem_cache_cb_sem);
|
||||
|
||||
/* Solaris destructor takes no flags, silently eat them */
|
||||
if (destructor)
|
||||
destructor(ptr, private);
|
||||
|
||||
atomic_dec(&kcc->kcc_ref);
|
||||
}
|
||||
|
||||
/* XXX - Arguments are ignored */
|
||||
|
@ -168,7 +204,6 @@ static int
|
|||
kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
||||
{
|
||||
kmem_cache_cb_t *kcc;
|
||||
unsigned long flags;
|
||||
int total = 0;
|
||||
|
||||
/* Under linux a shrinker is not tightly coupled with a slab
|
||||
|
@ -178,9 +213,23 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
|||
* function in the shim layer for all slab caches. And we always
|
||||
* attempt to shrink all caches when this generic shrinker is called.
|
||||
*/
|
||||
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||
down_read(&kmem_cache_cb_sem);
|
||||
|
||||
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
|
||||
ASSERT(kcc);
|
||||
ASSERT(kcc->kcc_magic == KCC_MAGIC);
|
||||
|
||||
/* Take a reference on the cache in question. If that
|
||||
* cache is contended simply skip it, it may already be
|
||||
* in the process of a reclaim or the ctor/dtor may be
|
||||
* running in either case it's best to skip it.
|
||||
*/
|
||||
atomic_inc(&kcc->kcc_ref);
|
||||
if (atomic_read(&kcc->kcc_ref) > 1) {
|
||||
atomic_dec(&kcc->kcc_ref);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Under linux the desired number and gfp type of objects
|
||||
* is passed to the reclaiming function as a sugested reclaim
|
||||
* target. I do not pass these args on because reclaim
|
||||
|
@ -190,6 +239,7 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
|||
if (kcc->kcc_reclaim)
|
||||
kcc->kcc_reclaim(kcc->kcc_private);
|
||||
|
||||
atomic_dec(&kcc->kcc_ref);
|
||||
total += 1;
|
||||
}
|
||||
|
||||
|
@ -199,7 +249,8 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
|||
* was registered with the generic shrinker. This should fake out
|
||||
* the linux VM when it attempts to shrink caches.
|
||||
*/
|
||||
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||
up_read(&kmem_cache_cb_sem);
|
||||
|
||||
return total;
|
||||
}
|
||||
|
||||
|
@ -238,18 +289,18 @@ __kmem_cache_create(char *name, size_t size, size_t align,
|
|||
RETURN(NULL);
|
||||
|
||||
/* Register shared shrinker function on initial cache create */
|
||||
spin_lock(&kmem_cache_cb_lock);
|
||||
down_read(&kmem_cache_cb_sem);
|
||||
if (list_empty(&kmem_cache_cb_list)) {
|
||||
kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
|
||||
kmem_cache_generic_shrinker);
|
||||
if (kmem_cache_shrinker == NULL) {
|
||||
kmem_cache_destroy(cache);
|
||||
spin_unlock(&kmem_cache_cb_lock);
|
||||
up_read(&kmem_cache_cb_sem);
|
||||
RETURN(NULL);
|
||||
}
|
||||
|
||||
}
|
||||
spin_unlock(&kmem_cache_cb_lock);
|
||||
up_read(&kmem_cache_cb_sem);
|
||||
|
||||
kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
|
||||
reclaim, priv, vmp);
|
||||
|
@ -272,27 +323,31 @@ __kmem_cache_destroy(kmem_cache_t *cache)
|
|||
{
|
||||
kmem_cache_cb_t *kcc;
|
||||
char *name;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
ENTRY;
|
||||
|
||||
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||
down_read(&kmem_cache_cb_sem);
|
||||
kcc = kmem_cache_find_cache_cb(cache);
|
||||
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||
if (kcc == NULL)
|
||||
if (kcc == NULL) {
|
||||
up_read(&kmem_cache_cb_sem);
|
||||
RETURN(-EINVAL);
|
||||
}
|
||||
atomic_inc(&kcc->kcc_ref);
|
||||
up_read(&kmem_cache_cb_sem);
|
||||
|
||||
name = (char *)kmem_cache_name(cache);
|
||||
rc = kmem_cache_destroy(cache);
|
||||
|
||||
atomic_dec(&kcc->kcc_ref);
|
||||
kmem_cache_remove_cache_cb(kcc);
|
||||
kfree(name);
|
||||
|
||||
/* Unregister generic shrinker on removal of all caches */
|
||||
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||
down_read(&kmem_cache_cb_sem);
|
||||
if (list_empty(&kmem_cache_cb_list))
|
||||
remove_shrinker(kmem_cache_shrinker);
|
||||
|
||||
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||
up_read(&kmem_cache_cb_sem);
|
||||
RETURN(rc);
|
||||
}
|
||||
EXPORT_SYMBOL(__kmem_cache_destroy);
|
||||
|
@ -312,25 +367,101 @@ int
|
|||
kmem_init(void)
|
||||
{
|
||||
ENTRY;
|
||||
|
||||
init_rwsem(&kmem_cache_cb_sem);
|
||||
INIT_LIST_HEAD(&kmem_cache_cb_list);
|
||||
#ifdef DEBUG_KMEM
|
||||
{
|
||||
int i;
|
||||
atomic64_set(&kmem_alloc_used, 0);
|
||||
atomic64_set(&vmem_alloc_used, 0);
|
||||
|
||||
spin_lock_init(&kmem_lock);
|
||||
INIT_LIST_HEAD(&kmem_list);
|
||||
|
||||
for (i = 0; i < KMEM_TABLE_SIZE; i++)
|
||||
INIT_HLIST_HEAD(&kmem_table[i]);
|
||||
}
|
||||
#endif
|
||||
RETURN(0);
|
||||
}
|
||||
|
||||
static char *sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
|
||||
{
|
||||
int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
|
||||
int i, flag = 1;
|
||||
|
||||
ASSERT(str != NULL && len >= 17);
|
||||
memset(str, 0, len);
|
||||
|
||||
/* Check for a fully printable string, and while we are at
|
||||
* it place the printable characters in the passed buffer. */
|
||||
for (i = 0; i < size; i++) {
|
||||
str[i] = ((char *)(kd->kd_addr))[i];
|
||||
if (isprint(str[i])) {
|
||||
continue;
|
||||
} else {
|
||||
/* Minimum number of printable characters found
|
||||
* to make it worthwhile to print this as ascii. */
|
||||
if (i > min)
|
||||
break;
|
||||
|
||||
flag = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (!flag) {
|
||||
sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
|
||||
*((uint8_t *)kd->kd_addr),
|
||||
*((uint8_t *)kd->kd_addr + 2),
|
||||
*((uint8_t *)kd->kd_addr + 4),
|
||||
*((uint8_t *)kd->kd_addr + 6),
|
||||
*((uint8_t *)kd->kd_addr + 8),
|
||||
*((uint8_t *)kd->kd_addr + 10),
|
||||
*((uint8_t *)kd->kd_addr + 12),
|
||||
*((uint8_t *)kd->kd_addr + 14));
|
||||
}
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
void
|
||||
kmem_fini(void)
|
||||
{
|
||||
ENTRY;
|
||||
#ifdef DEBUG_KMEM
|
||||
{
|
||||
unsigned long flags;
|
||||
kmem_debug_t *kd;
|
||||
char str[17];
|
||||
|
||||
if (atomic64_read(&kmem_alloc_used) != 0)
|
||||
CWARN("kmem leaked %ld/%ld bytes\n",
|
||||
atomic_read(&kmem_alloc_used), kmem_alloc_max);
|
||||
|
||||
/* Display all unreclaimed memory addresses, including the
|
||||
* allocation size and the first few bytes of what's located
|
||||
* at that address to aid in debugging. Performance is not
|
||||
* a serious concern here since it is module unload time. */
|
||||
spin_lock_irqsave(&kmem_lock, flags);
|
||||
if (!list_empty(&kmem_list))
|
||||
CDEBUG(D_WARNING, "%-16s %-5s %-16s %s:%s\n",
|
||||
"address", "size", "data", "func", "line");
|
||||
|
||||
list_for_each_entry(kd, &kmem_list, kd_list) {
|
||||
CDEBUG(D_WARNING, "%p %-5d %-16s %s:%d\n",
|
||||
kd->kd_addr, kd->kd_size,
|
||||
sprintf_addr(kd, str, 17, 8),
|
||||
kd->kd_func, kd->kd_line);
|
||||
}
|
||||
spin_unlock_irqrestore(&kmem_lock, flags);
|
||||
|
||||
if (atomic64_read(&vmem_alloc_used) != 0)
|
||||
CWARN("vmem leaked %ld/%ld bytes\n",
|
||||
atomic_read(&vmem_alloc_used), vmem_alloc_max);
|
||||
}
|
||||
#endif
|
||||
EXIT;
|
||||
}
|
||||
|
|
|
@ -29,13 +29,15 @@ int mutex_spin_max = 100;
|
|||
|
||||
#ifdef DEBUG_MUTEX
|
||||
int mutex_stats[MUTEX_STATS_SIZE] = { 0 };
|
||||
DEFINE_MUTEX(mutex_stats_lock);
|
||||
struct rw_semaphore mutex_stats_sem;
|
||||
LIST_HEAD(mutex_stats_list);
|
||||
#endif
|
||||
|
||||
void
|
||||
__spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
||||
{
|
||||
int flags = KM_SLEEP;
|
||||
|
||||
ASSERT(mp);
|
||||
ASSERT(name);
|
||||
ASSERT(ibc == NULL);
|
||||
|
@ -58,12 +60,18 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
|||
SBUG();
|
||||
}
|
||||
|
||||
/* We may be called when there is a non-zero preempt_count or
|
||||
* interrupts are disabled is which case we must not sleep.
|
||||
*/
|
||||
if (current_thread_info()->preempt_count || irqs_disabled())
|
||||
flags = KM_NOSLEEP;
|
||||
|
||||
/* Semaphore kmem_alloc'ed to keep struct size down (<64b) */
|
||||
mp->km_sem = kmem_alloc(sizeof(struct semaphore), KM_SLEEP);
|
||||
mp->km_sem = kmem_alloc(sizeof(struct semaphore), flags);
|
||||
if (mp->km_sem == NULL)
|
||||
return;
|
||||
|
||||
mp->km_name = kmem_alloc(mp->km_name_size, KM_SLEEP);
|
||||
mp->km_name = kmem_alloc(mp->km_name_size, flags);
|
||||
if (mp->km_name == NULL) {
|
||||
kmem_free(mp->km_sem, sizeof(struct semaphore));
|
||||
return;
|
||||
|
@ -73,16 +81,19 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
|||
strcpy(mp->km_name, name);
|
||||
|
||||
#ifdef DEBUG_MUTEX
|
||||
mp->km_stats = kmem_zalloc(sizeof(int) * MUTEX_STATS_SIZE, KM_SLEEP);
|
||||
mp->km_stats = kmem_zalloc(sizeof(int) * MUTEX_STATS_SIZE, flags);
|
||||
if (mp->km_stats == NULL) {
|
||||
kmem_free(mp->km_name, mp->km_name_size);
|
||||
kmem_free(mp->km_sem, sizeof(struct semaphore));
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&mutex_stats_lock);
|
||||
/* We may be called when there is a non-zero preempt_count or
|
||||
* interrupts are disabled is which case we must not sleep.
|
||||
*/
|
||||
while (!down_write_trylock(&mutex_stats_sem));
|
||||
list_add_tail(&mp->km_list, &mutex_stats_list);
|
||||
mutex_unlock(&mutex_stats_lock);
|
||||
up_write(&mutex_stats_sem);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(__spl_mutex_init);
|
||||
|
@ -94,9 +105,12 @@ __spl_mutex_destroy(kmutex_t *mp)
|
|||
ASSERT(mp->km_magic == KM_MAGIC);
|
||||
|
||||
#ifdef DEBUG_MUTEX
|
||||
mutex_lock(&mutex_stats_lock);
|
||||
/* We may be called when there is a non-zero preempt_count or
|
||||
* interrupts are disabled is which case we must not sleep.
|
||||
*/
|
||||
while (!down_write_trylock(&mutex_stats_sem));
|
||||
list_del_init(&mp->km_list);
|
||||
mutex_unlock(&mutex_stats_lock);
|
||||
up_write(&mutex_stats_sem);
|
||||
|
||||
kmem_free(mp->km_stats, sizeof(int) * MUTEX_STATS_SIZE);
|
||||
#endif
|
||||
|
|
|
@ -426,7 +426,7 @@ mutex_seq_start(struct seq_file *f, loff_t *pos)
|
|||
loff_t n = *pos;
|
||||
ENTRY;
|
||||
|
||||
mutex_lock(&mutex_stats_lock);
|
||||
down_read(&mutex_stats_sem);
|
||||
if (!n)
|
||||
mutex_seq_show_headers(f);
|
||||
|
||||
|
@ -454,7 +454,7 @@ mutex_seq_next(struct seq_file *f, void *p, loff_t *pos)
|
|||
static void
|
||||
mutex_seq_stop(struct seq_file *f, void *v)
|
||||
{
|
||||
mutex_unlock(&mutex_stats_lock);
|
||||
up_read(&mutex_stats_sem);
|
||||
}
|
||||
|
||||
static struct seq_operations mutex_seq_ops = {
|
||||
|
|
|
@ -306,6 +306,7 @@ splat_kmem_test4(struct file *file, void *arg)
|
|||
}
|
||||
|
||||
max = kcp.kcp_count;
|
||||
ASSERT(max > 0);
|
||||
|
||||
/* Force shrinker to run */
|
||||
kmem_reap();
|
||||
|
|
Loading…
Reference in New Issue