Fixes:
1) Ensure mutex_init() never fails in the case of ENOMEM by retrying forever. I don't think I've ever seen this happen but it was clear after code inspection that if it did we would immediately crash. 2) Enable full debugging in check.sh for sanity tests. Might as well get as much debug as we can in the case of a failure. 3) Reworked list of kmem caches tracked by SPL in to a hash with the key based on the address of the kmem_cache_t. This should speed up the constructor/destructor/shrinker lookup needed now for newer kernel which removed the destructor support. 4) Updated kmem_cache_create to handle the case where CONFIG_SLUB is defined. The slub would occasionally merge slab caches which resulted in non-unique keys for our hash lookup in 3). To fix this we detect if the slub is enabled and then set the needed flag to prevent this merging from ever occuring. 5) New kernels removed the proc_dir_entry pointer from items registered by sysctl. This means we can no long be sneaky and manually insert things in to the sysctl tree simply by walking the proc tree. So I'm forced to create a seperate tree for all the things I can't easily support via sysctl interface. I don't like it but it will do for now. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@124 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
parent
691d2bd733
commit
c30df9c863
|
@ -280,6 +280,17 @@ AC_DEFUN([SPL_CHECK_SYMBOL_EXPORT],
|
||||||
fi
|
fi
|
||||||
])
|
])
|
||||||
|
|
||||||
|
dnl #
|
||||||
|
dnl # 2.6.x API change
|
||||||
|
dnl # Slab can now be implemented in terms of the Slub which provides
|
||||||
|
dnl # slightly different semantics in terms of merged caches.
|
||||||
|
dnl #
|
||||||
|
AC_DEFUN([SPL_AC_SLUB], [
|
||||||
|
SPL_LINUX_CONFIG([SLUB],
|
||||||
|
[AC_DEFINE(HAVE_SLUB, 1, [slub support configured])],
|
||||||
|
[])
|
||||||
|
])
|
||||||
|
|
||||||
dnl #
|
dnl #
|
||||||
dnl # 2.6.x API change
|
dnl # 2.6.x API change
|
||||||
dnl # check if uintptr_t typedef is defined
|
dnl # check if uintptr_t typedef is defined
|
||||||
|
|
|
@ -43,6 +43,7 @@ SPL_AC_DEBUG_KMEM
|
||||||
SPL_AC_DEBUG_MUTEX
|
SPL_AC_DEBUG_MUTEX
|
||||||
SPL_AC_DEBUG_KSTAT
|
SPL_AC_DEBUG_KSTAT
|
||||||
SPL_AC_DEBUG_CALLB
|
SPL_AC_DEBUG_CALLB
|
||||||
|
SPL_AC_SLUB
|
||||||
SPL_AC_TYPE_UINTPTR_T
|
SPL_AC_TYPE_UINTPTR_T
|
||||||
SPL_AC_TYPE_KMEM_CACHE_T
|
SPL_AC_TYPE_KMEM_CACHE_T
|
||||||
SPL_AC_KMEM_CACHE_DESTROY_INT
|
SPL_AC_KMEM_CACHE_DESTROY_INT
|
||||||
|
|
|
@ -291,7 +291,7 @@ do { \
|
||||||
if (unlikely(!(cond))) { \
|
if (unlikely(!(cond))) { \
|
||||||
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
|
spl_debug_msg(NULL, DEBUG_SUBSYSTEM, D_EMERG, \
|
||||||
__FILE__, __FUNCTION__, __LINE__, \
|
__FILE__, __FUNCTION__, __LINE__, \
|
||||||
"ASSERTION(" #cond ") failed:" fmt, \
|
"ASSERTION(" #cond ") failed: " fmt, \
|
||||||
## a); \
|
## a); \
|
||||||
SBUG(); \
|
SBUG(); \
|
||||||
} \
|
} \
|
||||||
|
|
|
@ -78,7 +78,7 @@ extern struct list_head mutex_stats_list;
|
||||||
int spl_mutex_init(void);
|
int spl_mutex_init(void);
|
||||||
void spl_mutex_fini(void);
|
void spl_mutex_fini(void);
|
||||||
|
|
||||||
extern void __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc);
|
extern int __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc);
|
||||||
extern void __spl_mutex_destroy(kmutex_t *mp);
|
extern void __spl_mutex_destroy(kmutex_t *mp);
|
||||||
extern int __mutex_tryenter(kmutex_t *mp);
|
extern int __mutex_tryenter(kmutex_t *mp);
|
||||||
extern void __mutex_enter(kmutex_t *mp);
|
extern void __mutex_enter(kmutex_t *mp);
|
||||||
|
@ -91,10 +91,11 @@ extern kthread_t *__spl_mutex_owner(kmutex_t *mp);
|
||||||
|
|
||||||
#define mutex_init(mp, name, type, ibc) \
|
#define mutex_init(mp, name, type, ibc) \
|
||||||
({ \
|
({ \
|
||||||
|
/* May never fail or all subsequent mutex_* calls will ASSERT */\
|
||||||
if ((name) == NULL) \
|
if ((name) == NULL) \
|
||||||
__spl_mutex_init(mp, #mp, type, ibc); \
|
while(__spl_mutex_init(mp, #mp, type, ibc)); \
|
||||||
else \
|
else \
|
||||||
__spl_mutex_init(mp, name, type, ibc); \
|
while(__spl_mutex_init(mp, name, type, ibc)); \
|
||||||
})
|
})
|
||||||
#define mutex_destroy(mp) __spl_mutex_destroy(mp)
|
#define mutex_destroy(mp) __spl_mutex_destroy(mp)
|
||||||
#define mutex_tryenter(mp) __mutex_tryenter(mp)
|
#define mutex_tryenter(mp) __mutex_tryenter(mp)
|
||||||
|
|
|
@ -49,7 +49,7 @@
|
||||||
#endif /* CONFIG_SYSCTL */
|
#endif /* CONFIG_SYSCTL */
|
||||||
|
|
||||||
#ifdef DEBUG_KSTAT
|
#ifdef DEBUG_KSTAT
|
||||||
extern struct proc_dir_entry *proc_sys_spl_kstat;
|
extern struct proc_dir_entry *proc_spl_kstat;
|
||||||
struct proc_dir_entry *proc_dir_entry_find(struct proc_dir_entry *root,
|
struct proc_dir_entry *proc_dir_entry_find(struct proc_dir_entry *root,
|
||||||
const char *str);
|
const char *str);
|
||||||
int proc_dir_entries(struct proc_dir_entry *root);
|
int proc_dir_entries(struct proc_dir_entry *root);
|
||||||
|
|
|
@ -92,6 +92,7 @@ EXPORT_SYMBOL(kmem_set_warning);
|
||||||
|
|
||||||
typedef struct kmem_cache_cb {
|
typedef struct kmem_cache_cb {
|
||||||
int kcc_magic;
|
int kcc_magic;
|
||||||
|
struct hlist_node kcc_hlist;
|
||||||
struct list_head kcc_list;
|
struct list_head kcc_list;
|
||||||
kmem_cache_t * kcc_cache;
|
kmem_cache_t * kcc_cache;
|
||||||
kmem_constructor_t kcc_constructor;
|
kmem_constructor_t kcc_constructor;
|
||||||
|
@ -102,8 +103,13 @@ typedef struct kmem_cache_cb {
|
||||||
atomic_t kcc_ref;
|
atomic_t kcc_ref;
|
||||||
} kmem_cache_cb_t;
|
} kmem_cache_cb_t;
|
||||||
|
|
||||||
static struct rw_semaphore kmem_cache_cb_sem;
|
#define KMEM_CACHE_HASH_BITS 10
|
||||||
static struct list_head kmem_cache_cb_list;
|
#define KMEM_CACHE_TABLE_SIZE (1 << KMEM_CACHE_HASH_BITS)
|
||||||
|
|
||||||
|
struct hlist_head kmem_cache_table[KMEM_CACHE_TABLE_SIZE];
|
||||||
|
struct list_head kmem_cache_list;
|
||||||
|
static struct rw_semaphore kmem_cache_sem;
|
||||||
|
|
||||||
#ifdef HAVE_SET_SHRINKER
|
#ifdef HAVE_SET_SHRINKER
|
||||||
static struct shrinker *kmem_cache_shrinker;
|
static struct shrinker *kmem_cache_shrinker;
|
||||||
#else
|
#else
|
||||||
|
@ -114,20 +120,23 @@ static struct shrinker kmem_cache_shrinker = {
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Function must be called while holding the kmem_cache_cb_sem
|
/* Function must be called while holding the kmem_cache_sem
|
||||||
* Because kmem_cache_t is an opaque datatype we're forced to
|
* Because kmem_cache_t is an opaque datatype we're forced to
|
||||||
* match pointers to identify specific cache entires.
|
* match pointers to identify specific cache entires.
|
||||||
*/
|
*/
|
||||||
static kmem_cache_cb_t *
|
static kmem_cache_cb_t *
|
||||||
kmem_cache_find_cache_cb(kmem_cache_t *cache)
|
kmem_cache_find_cache_cb(kmem_cache_t *cache)
|
||||||
{
|
{
|
||||||
|
struct hlist_head *head;
|
||||||
|
struct hlist_node *node;
|
||||||
kmem_cache_cb_t *kcc;
|
kmem_cache_cb_t *kcc;
|
||||||
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
|
||||||
ASSERT(rwsem_is_locked(&kmem_cache_cb_sem));
|
ASSERT(rwsem_is_locked(&kmem_cache_sem));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list)
|
head = &kmem_cache_table[hash_ptr(cache, KMEM_CACHE_HASH_BITS)];
|
||||||
if (cache == kcc->kcc_cache)
|
hlist_for_each_entry_rcu(kcc, node, head, kcc_hlist)
|
||||||
|
if (kcc->kcc_cache == cache)
|
||||||
return kcc;
|
return kcc;
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -152,9 +161,11 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
|
||||||
kcc->kcc_private = priv;
|
kcc->kcc_private = priv;
|
||||||
kcc->kcc_vmp = vmp;
|
kcc->kcc_vmp = vmp;
|
||||||
atomic_set(&kcc->kcc_ref, 0);
|
atomic_set(&kcc->kcc_ref, 0);
|
||||||
down_write(&kmem_cache_cb_sem);
|
down_write(&kmem_cache_sem);
|
||||||
list_add(&kcc->kcc_list, &kmem_cache_cb_list);
|
hlist_add_head_rcu(&kcc->kcc_hlist, &kmem_cache_table[
|
||||||
up_write(&kmem_cache_cb_sem);
|
hash_ptr(cache, KMEM_CACHE_HASH_BITS)]);
|
||||||
|
list_add_tail(&kcc->kcc_list, &kmem_cache_list);
|
||||||
|
up_write(&kmem_cache_sem);
|
||||||
}
|
}
|
||||||
|
|
||||||
return kcc;
|
return kcc;
|
||||||
|
@ -163,12 +174,13 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
|
||||||
static void
|
static void
|
||||||
kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
|
kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
|
||||||
{
|
{
|
||||||
down_write(&kmem_cache_cb_sem);
|
down_write(&kmem_cache_sem);
|
||||||
ASSERT(atomic_read(&kcc->kcc_ref) == 0);
|
ASSERT(atomic_read(&kcc->kcc_ref) == 0);
|
||||||
list_del(&kcc->kcc_list);
|
hlist_del_init(&kcc->kcc_hlist);
|
||||||
up_write(&kmem_cache_cb_sem);
|
list_del_init(&kcc->kcc_list);
|
||||||
|
up_write(&kmem_cache_sem);
|
||||||
|
|
||||||
if (kcc){
|
if (kcc) {
|
||||||
memset(kcc, KCC_POISON, sizeof(*kcc));
|
memset(kcc, KCC_POISON, sizeof(*kcc));
|
||||||
kfree(kcc);
|
kfree(kcc);
|
||||||
}
|
}
|
||||||
|
@ -208,7 +220,7 @@ kmem_cache_generic_constructor(kmem_cache_t *cache, void *ptr)
|
||||||
/* We can be called with interrupts disabled so it is critical that
|
/* We can be called with interrupts disabled so it is critical that
|
||||||
* this function and the registered constructor never sleep.
|
* this function and the registered constructor never sleep.
|
||||||
*/
|
*/
|
||||||
while (!down_read_trylock(&kmem_cache_cb_sem));
|
while (!down_read_trylock(&kmem_cache_sem));
|
||||||
|
|
||||||
/* Callback list must be in sync with linux slab caches */
|
/* Callback list must be in sync with linux slab caches */
|
||||||
kcc = kmem_cache_find_cache_cb(cache);
|
kcc = kmem_cache_find_cache_cb(cache);
|
||||||
|
@ -219,7 +231,7 @@ kmem_cache_generic_constructor(kmem_cache_t *cache, void *ptr)
|
||||||
constructor = kcc->kcc_constructor;
|
constructor = kcc->kcc_constructor;
|
||||||
private = kcc->kcc_private;
|
private = kcc->kcc_private;
|
||||||
|
|
||||||
up_read(&kmem_cache_cb_sem);
|
up_read(&kmem_cache_sem);
|
||||||
|
|
||||||
if (constructor)
|
if (constructor)
|
||||||
constructor(ptr, private, (int)flags);
|
constructor(ptr, private, (int)flags);
|
||||||
|
@ -242,7 +254,7 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
|
||||||
/* We can be called with interrupts disabled so it is critical that
|
/* We can be called with interrupts disabled so it is critical that
|
||||||
* this function and the registered constructor never sleep.
|
* this function and the registered constructor never sleep.
|
||||||
*/
|
*/
|
||||||
while (!down_read_trylock(&kmem_cache_cb_sem));
|
while (!down_read_trylock(&kmem_cache_sem));
|
||||||
|
|
||||||
/* Callback list must be in sync with linux slab caches */
|
/* Callback list must be in sync with linux slab caches */
|
||||||
kcc = kmem_cache_find_cache_cb(cache);
|
kcc = kmem_cache_find_cache_cb(cache);
|
||||||
|
@ -253,7 +265,7 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
|
||||||
destructor = kcc->kcc_destructor;
|
destructor = kcc->kcc_destructor;
|
||||||
private = kcc->kcc_private;
|
private = kcc->kcc_private;
|
||||||
|
|
||||||
up_read(&kmem_cache_cb_sem);
|
up_read(&kmem_cache_sem);
|
||||||
|
|
||||||
/* Solaris destructor takes no flags, silently eat them */
|
/* Solaris destructor takes no flags, silently eat them */
|
||||||
if (destructor)
|
if (destructor)
|
||||||
|
@ -276,9 +288,9 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
||||||
* function in the shim layer for all slab caches. And we always
|
* function in the shim layer for all slab caches. And we always
|
||||||
* attempt to shrink all caches when this generic shrinker is called.
|
* attempt to shrink all caches when this generic shrinker is called.
|
||||||
*/
|
*/
|
||||||
down_read(&kmem_cache_cb_sem);
|
down_read(&kmem_cache_sem);
|
||||||
|
|
||||||
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
|
list_for_each_entry(kcc, &kmem_cache_list, kcc_list) {
|
||||||
ASSERT(kcc);
|
ASSERT(kcc);
|
||||||
ASSERT(kcc->kcc_magic == KCC_MAGIC);
|
ASSERT(kcc->kcc_magic == KCC_MAGIC);
|
||||||
|
|
||||||
|
@ -312,7 +324,7 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
||||||
* was registered with the generic shrinker. This should fake out
|
* was registered with the generic shrinker. This should fake out
|
||||||
* the linux VM when it attempts to shrink caches.
|
* the linux VM when it attempts to shrink caches.
|
||||||
*/
|
*/
|
||||||
up_read(&kmem_cache_cb_sem);
|
up_read(&kmem_cache_sem);
|
||||||
|
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
|
@ -349,6 +361,25 @@ __kmem_cache_create(char *name, size_t size, size_t align,
|
||||||
|
|
||||||
strcpy(cache_name, name);
|
strcpy(cache_name, name);
|
||||||
|
|
||||||
|
/* When your slab is implemented in terms of the slub it
|
||||||
|
* is possible similarly sized slab caches will be merged.
|
||||||
|
* For our implementation we must make sure this never
|
||||||
|
* happens because we require a unique cache address to
|
||||||
|
* use as a hash key when looking up the constructor,
|
||||||
|
* destructor, and shrinker registered for each unique
|
||||||
|
* type of slab cache. Passing any of the following flags
|
||||||
|
* will prevent the slub merging.
|
||||||
|
*
|
||||||
|
* SLAB_RED_ZONE
|
||||||
|
* SLAB_POISON
|
||||||
|
* SLAB_STORE_USER
|
||||||
|
* SLAB_TRACE
|
||||||
|
* SLAB_DESTROY_BY_RCU
|
||||||
|
*/
|
||||||
|
#ifdef HAVE_SLUB
|
||||||
|
flags |= SLAB_STORE_USER;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
|
#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
|
||||||
cache = kmem_cache_create(cache_name, size, align, flags,
|
cache = kmem_cache_create(cache_name, size, align, flags,
|
||||||
kmem_cache_generic_constructor,
|
kmem_cache_generic_constructor,
|
||||||
|
@ -360,22 +391,21 @@ __kmem_cache_create(char *name, size_t size, size_t align,
|
||||||
RETURN(NULL);
|
RETURN(NULL);
|
||||||
|
|
||||||
/* Register shared shrinker function on initial cache create */
|
/* Register shared shrinker function on initial cache create */
|
||||||
down_read(&kmem_cache_cb_sem);
|
down_read(&kmem_cache_sem);
|
||||||
if (list_empty(&kmem_cache_cb_list)) {
|
if (list_empty(&kmem_cache_list)) {
|
||||||
#ifdef HAVE_SET_SHRINKER
|
#ifdef HAVE_SET_SHRINKER
|
||||||
kmem_cache_shrinker =
|
kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
|
||||||
set_shrinker(KMC_DEFAULT_SEEKS,
|
|
||||||
kmem_cache_generic_shrinker);
|
kmem_cache_generic_shrinker);
|
||||||
if (kmem_cache_shrinker == NULL) {
|
if (kmem_cache_shrinker == NULL) {
|
||||||
kmem_cache_destroy(cache);
|
kmem_cache_destroy(cache);
|
||||||
up_read(&kmem_cache_cb_sem);
|
up_read(&kmem_cache_sem);
|
||||||
RETURN(NULL);
|
RETURN(NULL);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
register_shrinker(&kmem_cache_shrinker);
|
register_shrinker(&kmem_cache_shrinker);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
up_read(&kmem_cache_cb_sem);
|
up_read(&kmem_cache_sem);
|
||||||
|
|
||||||
kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
|
kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
|
||||||
reclaim, priv, vmp);
|
reclaim, priv, vmp);
|
||||||
|
@ -405,14 +435,14 @@ __kmem_cache_destroy(kmem_cache_t *cache)
|
||||||
int rc;
|
int rc;
|
||||||
ENTRY;
|
ENTRY;
|
||||||
|
|
||||||
down_read(&kmem_cache_cb_sem);
|
down_read(&kmem_cache_sem);
|
||||||
kcc = kmem_cache_find_cache_cb(cache);
|
kcc = kmem_cache_find_cache_cb(cache);
|
||||||
if (kcc == NULL) {
|
if (kcc == NULL) {
|
||||||
up_read(&kmem_cache_cb_sem);
|
up_read(&kmem_cache_sem);
|
||||||
RETURN(-EINVAL);
|
RETURN(-EINVAL);
|
||||||
}
|
}
|
||||||
atomic_inc(&kcc->kcc_ref);
|
atomic_inc(&kcc->kcc_ref);
|
||||||
up_read(&kmem_cache_cb_sem);
|
up_read(&kmem_cache_sem);
|
||||||
|
|
||||||
name = (char *)kmem_cache_name(cache);
|
name = (char *)kmem_cache_name(cache);
|
||||||
|
|
||||||
|
@ -428,15 +458,15 @@ __kmem_cache_destroy(kmem_cache_t *cache)
|
||||||
kfree(name);
|
kfree(name);
|
||||||
|
|
||||||
/* Unregister generic shrinker on removal of all caches */
|
/* Unregister generic shrinker on removal of all caches */
|
||||||
down_read(&kmem_cache_cb_sem);
|
down_read(&kmem_cache_sem);
|
||||||
if (list_empty(&kmem_cache_cb_list))
|
if (list_empty(&kmem_cache_list))
|
||||||
#ifdef HAVE_SET_SHRINKER
|
#ifdef HAVE_SET_SHRINKER
|
||||||
remove_shrinker(kmem_cache_shrinker);
|
remove_shrinker(kmem_cache_shrinker);
|
||||||
#else
|
#else
|
||||||
unregister_shrinker(&kmem_cache_shrinker);
|
unregister_shrinker(&kmem_cache_shrinker);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
up_read(&kmem_cache_cb_sem);
|
up_read(&kmem_cache_sem);
|
||||||
RETURN(rc);
|
RETURN(rc);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__kmem_cache_destroy);
|
EXPORT_SYMBOL(__kmem_cache_destroy);
|
||||||
|
@ -463,7 +493,7 @@ restart:
|
||||||
GOTO(restart, obj);
|
GOTO(restart, obj);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* When destructor support is removed we must be careful not to
|
/* When destructor support is removed we must be careful not to
|
||||||
* use the provided constructor which will end up being called
|
* use the provided constructor which will end up being called
|
||||||
* more often than the destructor which we only call on free. Thus
|
* more often than the destructor which we only call on free. Thus
|
||||||
* we many call the proper constructor when there is no destructor.
|
* we many call the proper constructor when there is no destructor.
|
||||||
|
@ -473,8 +503,8 @@ restart:
|
||||||
kmem_cache_generic_constructor(obj, cache, flags);
|
kmem_cache_generic_constructor(obj, cache, flags);
|
||||||
#else
|
#else
|
||||||
kmem_cache_generic_constructor(cache, obj);
|
kmem_cache_generic_constructor(cache, obj);
|
||||||
#endif
|
#endif /* HAVE_KMEM_CACHE_CREATE_DTOR */
|
||||||
#endif
|
#endif /* HAVE_3ARG_KMEM_CACHE_CREATE_CTOR */
|
||||||
|
|
||||||
RETURN(obj);
|
RETURN(obj);
|
||||||
}
|
}
|
||||||
|
@ -504,13 +534,16 @@ EXPORT_SYMBOL(__kmem_reap);
|
||||||
int
|
int
|
||||||
kmem_init(void)
|
kmem_init(void)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
ENTRY;
|
ENTRY;
|
||||||
|
|
||||||
init_rwsem(&kmem_cache_cb_sem);
|
init_rwsem(&kmem_cache_sem);
|
||||||
INIT_LIST_HEAD(&kmem_cache_cb_list);
|
INIT_LIST_HEAD(&kmem_cache_list);
|
||||||
|
|
||||||
|
for (i = 0; i < KMEM_CACHE_TABLE_SIZE; i++)
|
||||||
|
INIT_HLIST_HEAD(&kmem_cache_table[i]);
|
||||||
|
|
||||||
#ifdef DEBUG_KMEM
|
#ifdef DEBUG_KMEM
|
||||||
{
|
|
||||||
int i;
|
|
||||||
atomic64_set(&kmem_alloc_used, 0);
|
atomic64_set(&kmem_alloc_used, 0);
|
||||||
atomic64_set(&vmem_alloc_used, 0);
|
atomic64_set(&vmem_alloc_used, 0);
|
||||||
|
|
||||||
|
@ -527,7 +560,6 @@ kmem_init(void)
|
||||||
INIT_HLIST_HEAD(&vmem_table[i]);
|
INIT_HLIST_HEAD(&vmem_table[i]);
|
||||||
|
|
||||||
atomic64_set(&kmem_cache_alloc_failed, 0);
|
atomic64_set(&kmem_cache_alloc_failed, 0);
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
RETURN(0);
|
RETURN(0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -416,9 +416,9 @@ __kstat_install(kstat_t *ksp)
|
||||||
list_add_tail(&ksp->ks_list, &kstat_list);
|
list_add_tail(&ksp->ks_list, &kstat_list);
|
||||||
spin_unlock(&kstat_lock);
|
spin_unlock(&kstat_lock);
|
||||||
|
|
||||||
de_module = proc_dir_entry_find(proc_sys_spl_kstat, ksp->ks_module);
|
de_module = proc_dir_entry_find(proc_spl_kstat, ksp->ks_module);
|
||||||
if (de_module == NULL) {
|
if (de_module == NULL) {
|
||||||
de_module = proc_mkdir(ksp->ks_module, proc_sys_spl_kstat);
|
de_module = proc_mkdir(ksp->ks_module, proc_spl_kstat);
|
||||||
if (de_module == NULL)
|
if (de_module == NULL)
|
||||||
GOTO(out, rc = -EUNATCH);
|
GOTO(out, rc = -EUNATCH);
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,7 +59,7 @@ spinlock_t mutex_stats_lock;
|
||||||
struct list_head mutex_stats_list;
|
struct list_head mutex_stats_list;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void
|
int
|
||||||
__spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
__spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
||||||
{
|
{
|
||||||
int flags = KM_SLEEP;
|
int flags = KM_SLEEP;
|
||||||
|
@ -69,8 +69,6 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
||||||
ASSERT(ibc == NULL);
|
ASSERT(ibc == NULL);
|
||||||
ASSERT(mp->km_magic != KM_MAGIC); /* Never double init */
|
ASSERT(mp->km_magic != KM_MAGIC); /* Never double init */
|
||||||
|
|
||||||
mp->km_magic = KM_MAGIC;
|
|
||||||
mp->km_owner = NULL;
|
|
||||||
mp->km_name = NULL;
|
mp->km_name = NULL;
|
||||||
mp->km_name_size = strlen(name) + 1;
|
mp->km_name_size = strlen(name) + 1;
|
||||||
|
|
||||||
|
@ -95,12 +93,12 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
||||||
/* Semaphore kmem_alloc'ed to keep struct size down (<64b) */
|
/* Semaphore kmem_alloc'ed to keep struct size down (<64b) */
|
||||||
mp->km_sem = kmem_alloc(sizeof(struct semaphore), flags);
|
mp->km_sem = kmem_alloc(sizeof(struct semaphore), flags);
|
||||||
if (mp->km_sem == NULL)
|
if (mp->km_sem == NULL)
|
||||||
return;
|
return -ENOMEM;
|
||||||
|
|
||||||
mp->km_name = kmem_alloc(mp->km_name_size, flags);
|
mp->km_name = kmem_alloc(mp->km_name_size, flags);
|
||||||
if (mp->km_name == NULL) {
|
if (mp->km_name == NULL) {
|
||||||
kmem_free(mp->km_sem, sizeof(struct semaphore));
|
kmem_free(mp->km_sem, sizeof(struct semaphore));
|
||||||
return;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
sema_init(mp->km_sem, 1);
|
sema_init(mp->km_sem, 1);
|
||||||
|
@ -111,7 +109,7 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
||||||
if (mp->km_stats == NULL) {
|
if (mp->km_stats == NULL) {
|
||||||
kmem_free(mp->km_name, mp->km_name_size);
|
kmem_free(mp->km_name, mp->km_name_size);
|
||||||
kmem_free(mp->km_sem, sizeof(struct semaphore));
|
kmem_free(mp->km_sem, sizeof(struct semaphore));
|
||||||
return;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX - This appears to be a much more contended lock than I
|
/* XXX - This appears to be a much more contended lock than I
|
||||||
|
@ -124,6 +122,10 @@ __spl_mutex_init(kmutex_t *mp, char *name, int type, void *ibc)
|
||||||
list_add_tail(&mp->km_list, &mutex_stats_list);
|
list_add_tail(&mp->km_list, &mutex_stats_list);
|
||||||
spin_unlock(&mutex_stats_lock);
|
spin_unlock(&mutex_stats_lock);
|
||||||
#endif
|
#endif
|
||||||
|
mp->km_magic = KM_MAGIC;
|
||||||
|
mp->km_owner = NULL;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__spl_mutex_init);
|
EXPORT_SYMBOL(__spl_mutex_init);
|
||||||
|
|
||||||
|
|
|
@ -39,21 +39,21 @@ static unsigned long table_max = ~0;
|
||||||
|
|
||||||
#ifdef CONFIG_SYSCTL
|
#ifdef CONFIG_SYSCTL
|
||||||
static struct ctl_table_header *spl_header = NULL;
|
static struct ctl_table_header *spl_header = NULL;
|
||||||
|
#endif /* CONFIG_SYSCTL */
|
||||||
|
|
||||||
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
|
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
|
||||||
static struct proc_dir_entry *proc_sys = NULL;
|
static struct proc_dir_entry *proc_spl = NULL;
|
||||||
static struct proc_dir_entry *proc_sys_spl = NULL;
|
|
||||||
#endif
|
|
||||||
#ifdef DEBUG_MUTEX
|
#ifdef DEBUG_MUTEX
|
||||||
static struct proc_dir_entry *proc_sys_spl_mutex = NULL;
|
static struct proc_dir_entry *proc_spl_mutex = NULL;
|
||||||
static struct proc_dir_entry *proc_sys_spl_mutex_stats = NULL;
|
static struct proc_dir_entry *proc_spl_mutex_stats = NULL;
|
||||||
#endif
|
#endif /* DEBUG_MUTEX */
|
||||||
#ifdef DEBUG_KMEM
|
#ifdef DEBUG_KMEM
|
||||||
static struct proc_dir_entry *proc_sys_spl_kmem = NULL;
|
static struct proc_dir_entry *proc_spl_kmem = NULL;
|
||||||
#endif
|
#endif /* DEBUG_KMEM */
|
||||||
#ifdef DEBUG_KSTAT
|
#ifdef DEBUG_KSTAT
|
||||||
struct proc_dir_entry *proc_sys_spl_kstat = NULL;
|
struct proc_dir_entry *proc_spl_kstat = NULL;
|
||||||
#endif
|
#endif /* DEBUG_KSTAT */
|
||||||
#endif
|
#endif /* DEBUG_MUTEX || DEBUG_KMEM || DEBUG_KSTAT */
|
||||||
|
|
||||||
#ifdef HAVE_CTL_UNNUMBERED
|
#ifdef HAVE_CTL_UNNUMBERED
|
||||||
|
|
||||||
|
@ -877,54 +877,50 @@ proc_init(void)
|
||||||
spl_header = spl_register_sysctl_table(spl_root, 0);
|
spl_header = spl_register_sysctl_table(spl_root, 0);
|
||||||
if (spl_header == NULL)
|
if (spl_header == NULL)
|
||||||
RETURN(-EUNATCH);
|
RETURN(-EUNATCH);
|
||||||
|
#endif /* CONFIG_SYSCTL */
|
||||||
|
|
||||||
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
|
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
|
||||||
proc_sys = proc_dir_entry_find(&proc_root, "sys");
|
proc_spl = proc_mkdir("spl", NULL);
|
||||||
if (proc_sys == NULL)
|
if (proc_spl == NULL)
|
||||||
GOTO(out, rc = -EUNATCH);
|
GOTO(out, rc = -EUNATCH);
|
||||||
|
|
||||||
proc_sys_spl = proc_dir_entry_find(proc_sys, "spl");
|
|
||||||
if (proc_sys_spl == NULL)
|
|
||||||
GOTO(out, rc = -EUNATCH);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef DEBUG_MUTEX
|
#ifdef DEBUG_MUTEX
|
||||||
proc_sys_spl_mutex = proc_dir_entry_find(proc_sys_spl, "mutex");
|
proc_spl_mutex = proc_mkdir("mutex", proc_spl);
|
||||||
if (proc_sys_spl_mutex == NULL)
|
if (proc_spl_mutex == NULL)
|
||||||
GOTO(out, rc = -EUNATCH);
|
GOTO(out, rc = -EUNATCH);
|
||||||
|
|
||||||
proc_sys_spl_mutex_stats = create_proc_entry("stats_per", 0444,
|
proc_spl_mutex_stats = create_proc_entry("stats_per", 0444,
|
||||||
proc_sys_spl_mutex);
|
proc_spl_mutex);
|
||||||
if (proc_sys_spl_mutex_stats == NULL)
|
if (proc_spl_mutex_stats == NULL)
|
||||||
GOTO(out, rc = -EUNATCH);
|
GOTO(out, rc = -EUNATCH);
|
||||||
|
|
||||||
proc_sys_spl_mutex_stats->proc_fops = &proc_mutex_operations;
|
proc_spl_mutex_stats->proc_fops = &proc_mutex_operations;
|
||||||
#endif /* DEBUG_MUTEX */
|
#endif /* DEBUG_MUTEX */
|
||||||
|
|
||||||
#ifdef DEBUG_KMEM
|
#ifdef DEBUG_KMEM
|
||||||
proc_sys_spl_kmem = proc_dir_entry_find(proc_sys_spl, "kmem");
|
proc_spl_kmem = proc_mkdir("kmem", proc_spl);
|
||||||
if (proc_sys_spl_kmem == NULL)
|
if (proc_spl_kmem == NULL)
|
||||||
GOTO(out2, rc = -EUNATCH);
|
GOTO(out, rc = -EUNATCH);
|
||||||
#endif /* DEBUG_KMEM */
|
#endif /* DEBUG_KMEM */
|
||||||
|
|
||||||
#ifdef DEBUG_KSTAT
|
#ifdef DEBUG_KSTAT
|
||||||
proc_sys_spl_kstat = proc_dir_entry_find(proc_sys_spl, "kstat");
|
proc_spl_kstat = proc_mkdir("kstat", proc_spl);
|
||||||
if (proc_sys_spl_kstat == NULL)
|
if (proc_spl_kstat == NULL)
|
||||||
GOTO(out2, rc = -EUNATCH);
|
GOTO(out, rc = -EUNATCH);
|
||||||
#endif /* DEBUG_KSTAT */
|
#endif /* DEBUG_KSTAT */
|
||||||
|
|
||||||
RETURN(rc);
|
|
||||||
#if defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
|
|
||||||
out2:
|
|
||||||
#endif
|
|
||||||
#ifdef DEBUG_MUTEX
|
|
||||||
remove_proc_entry("stats_per", proc_sys_spl_mutex);
|
|
||||||
#endif /* DEBUG_MUTEX */
|
|
||||||
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
|
|
||||||
out:
|
out:
|
||||||
#endif
|
if (rc) {
|
||||||
|
remove_proc_entry("kstat", proc_spl);
|
||||||
|
remove_proc_entry("kmem", proc_spl);
|
||||||
|
remove_proc_entry("stats_per", proc_spl_mutex);
|
||||||
|
remove_proc_entry("mutex", proc_spl);
|
||||||
|
#ifdef CONFIG_SYSCTL
|
||||||
spl_unregister_sysctl_table(spl_header);
|
spl_unregister_sysctl_table(spl_header);
|
||||||
#endif /* CONFIG_SYSCTL */
|
#endif /* CONFIG_SYSCTL */
|
||||||
|
}
|
||||||
|
#endif /* DEBUG_MUTEX || DEBUG_KMEM || DEBUG_KSTAT */
|
||||||
|
|
||||||
RETURN(rc);
|
RETURN(rc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -933,12 +929,17 @@ proc_fini(void)
|
||||||
{
|
{
|
||||||
ENTRY;
|
ENTRY;
|
||||||
|
|
||||||
|
#if defined(DEBUG_MUTEX) || defined(DEBUG_KMEM) || defined(DEBUG_KSTAT)
|
||||||
|
remove_proc_entry("kstat", proc_spl);
|
||||||
|
remove_proc_entry("kmem", proc_spl);
|
||||||
|
remove_proc_entry("stats_per", proc_spl_mutex);
|
||||||
|
remove_proc_entry("mutex", proc_spl);
|
||||||
|
#endif /* DEBUG_MUTEX || DEBUG_KMEM || DEBUG_KSTAT */
|
||||||
|
|
||||||
#ifdef CONFIG_SYSCTL
|
#ifdef CONFIG_SYSCTL
|
||||||
ASSERT(spl_header != NULL);
|
ASSERT(spl_header != NULL);
|
||||||
#ifdef DEBUG_MUTEX
|
|
||||||
remove_proc_entry("stats_per", proc_sys_spl_mutex);
|
|
||||||
#endif /* DEBUG_MUTEX */
|
|
||||||
spl_unregister_sysctl_table(spl_header);
|
spl_unregister_sysctl_table(spl_header);
|
||||||
#endif
|
#endif /* CONFIG_SYSCTL */
|
||||||
|
|
||||||
EXIT;
|
EXIT;
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,8 +37,9 @@ if [ ! -f ${spl_module} ] || [ ! -f ${splat_module} ]; then
|
||||||
die "Source tree must be built, run 'make'"
|
die "Source tree must be built, run 'make'"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
spl_module_params="spl_debug_mask=-1 spl_debug_subsys=-1"
|
||||||
echo "Loading ${spl_module}"
|
echo "Loading ${spl_module}"
|
||||||
/sbin/insmod ${spl_module} || die "Failed to load ${spl_module}"
|
/sbin/insmod ${spl_module} ${spl_module_params} || die "Failed to load ${spl_module}"
|
||||||
|
|
||||||
echo "Loading ${splat_module}"
|
echo "Loading ${splat_module}"
|
||||||
/sbin/insmod ${splat_module} || die "Unable to load ${splat_module}"
|
/sbin/insmod ${splat_module} || die "Unable to load ${splat_module}"
|
||||||
|
|
Loading…
Reference in New Issue