Add vmem memory accounting

git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@99 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-05-07 18:54:32 +00:00
parent 404992e31a
commit 13cdca65ec
2 changed files with 103 additions and 33 deletions

View File

@ -41,6 +41,13 @@ extern struct hlist_head kmem_table[KMEM_TABLE_SIZE];
extern struct list_head kmem_list;
extern spinlock_t kmem_lock;
#define VMEM_HASH_BITS 10
#define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
extern struct hlist_head vmem_table[VMEM_TABLE_SIZE];
extern struct list_head vmem_list;
extern spinlock_t vmem_lock;
typedef struct kmem_debug {
struct hlist_node kd_hlist; /* Hash node linkage */
struct list_head kd_list; /* List of all allocations */
@ -51,25 +58,25 @@ typedef struct kmem_debug {
} kmem_debug_t;
static __inline__ kmem_debug_t *
__kmem_del_init(void *addr)
__kmem_del_init(spinlock_t *lock,struct hlist_head *table,int bits,void *addr)
{
struct hlist_head *head;
struct hlist_node *node;
struct kmem_debug *p;
unsigned long flags;
spin_lock_irqsave(&kmem_lock, flags);
head = &kmem_table[hash_ptr(addr, KMEM_HASH_BITS)];
spin_lock_irqsave(lock, flags);
head = &table[hash_ptr(addr, bits)];
hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
if (p->kd_addr == addr) {
hlist_del_init(&p->kd_hlist);
list_del_init(&p->kd_list);
spin_unlock_irqrestore(&kmem_lock, flags);
spin_unlock_irqrestore(lock, flags);
return p;
}
}
spin_unlock_irqrestore(&kmem_lock, flags);
spin_unlock_irqrestore(lock, flags);
return NULL;
}
@ -138,7 +145,7 @@ __kmem_del_init(void *addr)
kmem_debug_t *_dptr_; \
ASSERT((ptr) || (size > 0)); \
\
_dptr_ = __kmem_del_init(ptr); \
_dptr_ = __kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);\
ASSERT(_dptr_); /* Must exist in hash due to kmem_alloc() */ \
ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
"kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
@ -156,31 +163,56 @@ __kmem_del_init(void *addr)
})
#define __vmem_alloc(size, flags) \
({ void *_ptr_; \
({ void *_ptr_ = NULL; \
kmem_debug_t *_dptr_; \
unsigned long _flags_; \
\
ASSERT(flags & KM_SLEEP); \
ASSERT((flags) & KM_SLEEP); \
\
_ptr_ = (void *)__vmalloc((size), \
(((flags) | __GFP_HIGHMEM) & ~__GFP_ZERO), \
PAGE_KERNEL); \
if (_ptr_ == NULL) { \
_dptr_ = (kmem_debug_t *)kmalloc(sizeof(kmem_debug_t), (flags)); \
if (_dptr_ == NULL) { \
__CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
"vmem_alloc(%d, 0x%x) failed (%ld/%ld)\n", \
(int)(size), (int)(flags), \
atomic64_read(&vmem_alloc_used), \
vmem_alloc_max); \
"vmem_alloc(%d, 0x%x) debug failed\n", \
sizeof(kmem_debug_t), (int)(flags)); \
} else { \
if (flags & __GFP_ZERO) \
memset(_ptr_, 0, (size)); \
_ptr_ = (void *)__vmalloc((size), (((flags) | \
__GFP_HIGHMEM) & ~__GFP_ZERO), \
PAGE_KERNEL); \
if (_ptr_ == NULL) { \
kfree(_dptr_); \
__CDEBUG_LIMIT(S_KMEM, D_WARNING, "Warning " \
"vmem_alloc(%d, 0x%x) failed (%ld/" \
"%ld)\n", (int)(size), (int)(flags), \
atomic64_read(&vmem_alloc_used), \
vmem_alloc_max); \
} else { \
if (flags & __GFP_ZERO) \
memset(_ptr_, 0, (size)); \
\
atomic64_add((size), &vmem_alloc_used); \
if (unlikely(atomic64_read(&vmem_alloc_used)>vmem_alloc_max)) \
vmem_alloc_max = atomic64_read(&vmem_alloc_used); \
atomic64_add((size), &vmem_alloc_used); \
if (unlikely(atomic64_read(&vmem_alloc_used) > \
vmem_alloc_max)) \
vmem_alloc_max = \
atomic64_read(&vmem_alloc_used); \
\
INIT_HLIST_NODE(&_dptr_->kd_hlist); \
INIT_LIST_HEAD(&_dptr_->kd_list); \
_dptr_->kd_addr = _ptr_; \
_dptr_->kd_size = (size); \
_dptr_->kd_func = __FUNCTION__; \
_dptr_->kd_line = __LINE__; \
spin_lock_irqsave(&vmem_lock, _flags_); \
hlist_add_head_rcu(&_dptr_->kd_hlist, \
&vmem_table[hash_ptr(_ptr_, VMEM_HASH_BITS)]);\
list_add_tail(&_dptr_->kd_list, &vmem_list); \
spin_unlock_irqrestore(&vmem_lock, _flags_); \
\
__CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(%d, 0x%x) = %p " \
"(%ld/%ld)\n", (int)(size), (int)(flags), \
_ptr_, atomic64_read(&vmem_alloc_used), \
vmem_alloc_max); \
__CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_alloc(" \
"%d, 0x%x) = %p (%ld/%ld)\n", \
(int)(size), (int)(flags), _ptr_, \
atomic64_read(&vmem_alloc_used), \
vmem_alloc_max); \
} \
} \
\
_ptr_; \
@ -192,12 +224,23 @@ __kmem_del_init(void *addr)
#define vmem_free(ptr, size) \
({ \
kmem_debug_t *_dptr_; \
ASSERT((ptr) || (size > 0)); \
\
_dptr_ = __kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);\
ASSERT(_dptr_); /* Must exist in hash due to vmem_alloc() */ \
ASSERTF(_dptr_->kd_size == (size), "kd_size (%d) != size (%d), " \
"kd_func = %s, kd_line = %d\n", _dptr_->kd_size, (size), \
_dptr_->kd_func, _dptr_->kd_line); /* Size must match */ \
atomic64_sub((size), &vmem_alloc_used); \
__CDEBUG_LIMIT(S_KMEM, D_INFO, "vmem_free(%p, %d) (%ld/%ld)\n", \
(ptr), (int)(size), atomic64_read(&vmem_alloc_used), \
vmem_alloc_max); \
memset(ptr, 0x5a, (size)); /* Poison */ \
\
memset(_dptr_, 0x5a, sizeof(kmem_debug_t)); \
kfree(_dptr_); \
\
memset(ptr, 0x5a, (size)); \
vfree(ptr); \
})

View File

@ -21,6 +21,10 @@ spinlock_t kmem_lock;
struct hlist_head kmem_table[KMEM_TABLE_SIZE];
struct list_head kmem_list;
spinlock_t vmem_lock;
struct hlist_head vmem_table[VMEM_TABLE_SIZE];
struct list_head vmem_list;
EXPORT_SYMBOL(kmem_alloc_used);
EXPORT_SYMBOL(kmem_alloc_max);
EXPORT_SYMBOL(vmem_alloc_used);
@ -31,6 +35,10 @@ EXPORT_SYMBOL(kmem_lock);
EXPORT_SYMBOL(kmem_table);
EXPORT_SYMBOL(kmem_list);
EXPORT_SYMBOL(vmem_lock);
EXPORT_SYMBOL(vmem_table);
EXPORT_SYMBOL(vmem_list);
int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
#else
int kmem_set_warning(int flag) { return 0; }
@ -381,6 +389,12 @@ kmem_init(void)
for (i = 0; i < KMEM_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&kmem_table[i]);
spin_lock_init(&vmem_lock);
INIT_LIST_HEAD(&vmem_list);
for (i = 0; i < VMEM_TABLE_SIZE; i++)
INIT_HLIST_HEAD(&vmem_table[i]);
}
#endif
RETURN(0);
@ -437,30 +451,43 @@ kmem_fini(void)
kmem_debug_t *kd;
char str[17];
if (atomic64_read(&kmem_alloc_used) != 0)
CWARN("kmem leaked %ld/%ld bytes\n",
atomic_read(&kmem_alloc_used), kmem_alloc_max);
/* Display all unreclaimed memory addresses, including the
* allocation size and the first few bytes of what's located
* at that address to aid in debugging. Performance is not
* a serious concern here since it is module unload time. */
if (atomic64_read(&kmem_alloc_used) != 0)
CWARN("kmem leaked %ld/%ld bytes\n",
atomic_read(&kmem_alloc_used), kmem_alloc_max);
spin_lock_irqsave(&kmem_lock, flags);
if (!list_empty(&kmem_list))
CDEBUG(D_WARNING, "%-16s %-5s %-16s %s:%s\n",
"address", "size", "data", "func", "line");
list_for_each_entry(kd, &kmem_list, kd_list) {
list_for_each_entry(kd, &kmem_list, kd_list)
CDEBUG(D_WARNING, "%p %-5d %-16s %s:%d\n",
kd->kd_addr, kd->kd_size,
kd->kd_addr, kd->kd_size,
sprintf_addr(kd, str, 17, 8),
kd->kd_func, kd->kd_line);
}
spin_unlock_irqrestore(&kmem_lock, flags);
if (atomic64_read(&vmem_alloc_used) != 0)
CWARN("vmem leaked %ld/%ld bytes\n",
atomic_read(&vmem_alloc_used), vmem_alloc_max);
spin_lock_irqsave(&vmem_lock, flags);
if (!list_empty(&vmem_list))
CDEBUG(D_WARNING, "%-16s %-5s %-16s %s:%s\n",
"address", "size", "data", "func", "line");
list_for_each_entry(kd, &vmem_list, kd_list)
CDEBUG(D_WARNING, "%p %-5d %-16s %s:%d\n",
kd->kd_addr, kd->kd_size,
sprintf_addr(kd, str, 17, 8),
kd->kd_func, kd->kd_line);
spin_unlock_irqrestore(&vmem_lock, flags);
}
#endif
EXIT;