* modules/spl/spl-kmem.c : Make sure to disable interrupts
when necessary to avoid deadlocks. We were seeing the deadlock when calling kmem_cache_generic_constructor() and then an interrupt forced us to end up calling kmem_cache_generic_destructor() which caused our deadlock. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@74 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
parent
d61e12af5a
commit
55152ebbb4
|
@ -1,3 +1,11 @@
|
||||||
|
2008-04-16 Herb Wartens <wartens2@llnl.gov>
|
||||||
|
|
||||||
|
* modules/spl/spl-kmem.c : Make sure to disable interrupts
|
||||||
|
when necessary to avoid deadlocks. We were seeing the deadlock
|
||||||
|
when calling kmem_cache_generic_constructor() and then an interrupt
|
||||||
|
forced us to end up calling kmem_cache_generic_destructor()
|
||||||
|
which caused our deadlock.
|
||||||
|
|
||||||
2008-02-26 Brian Behlendorf <behlendorf1@llnl.gov>
|
2008-02-26 Brian Behlendorf <behlendorf1@llnl.gov>
|
||||||
|
|
||||||
: Initial commit of the solaris porting layer (spl). Included
|
: Initial commit of the solaris porting layer (spl). Included
|
||||||
|
|
|
@ -77,6 +77,7 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
|
||||||
void *priv, void *vmp)
|
void *priv, void *vmp)
|
||||||
{
|
{
|
||||||
kmem_cache_cb_t *kcc;
|
kmem_cache_cb_t *kcc;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
|
kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
|
||||||
if (kcc) {
|
if (kcc) {
|
||||||
|
@ -86,9 +87,9 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
|
||||||
kcc->kcc_reclaim = reclaim;
|
kcc->kcc_reclaim = reclaim;
|
||||||
kcc->kcc_private = priv;
|
kcc->kcc_private = priv;
|
||||||
kcc->kcc_vmp = vmp;
|
kcc->kcc_vmp = vmp;
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||||
list_add(&kcc->kcc_list, &kmem_cache_cb_list);
|
list_add(&kcc->kcc_list, &kmem_cache_cb_list);
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return kcc;
|
return kcc;
|
||||||
|
@ -97,9 +98,11 @@ kmem_cache_add_cache_cb(kmem_cache_t *cache,
|
||||||
static void
|
static void
|
||||||
kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
|
kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
|
||||||
{
|
{
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||||
list_del(&kcc->kcc_list);
|
list_del(&kcc->kcc_list);
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||||
|
|
||||||
if (kcc)
|
if (kcc)
|
||||||
kfree(kcc);
|
kfree(kcc);
|
||||||
|
@ -110,9 +113,10 @@ kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long fla
|
||||||
{
|
{
|
||||||
kmem_cache_cb_t *kcc;
|
kmem_cache_cb_t *kcc;
|
||||||
kmem_constructor_t constructor;
|
kmem_constructor_t constructor;
|
||||||
|
unsigned long irqflags;
|
||||||
void *private;
|
void *private;
|
||||||
|
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
spin_lock_irqsave(&kmem_cache_cb_lock, irqflags);
|
||||||
|
|
||||||
/* Callback list must be in sync with linux slab caches */
|
/* Callback list must be in sync with linux slab caches */
|
||||||
kcc = kmem_cache_find_cache_cb(cache);
|
kcc = kmem_cache_find_cache_cb(cache);
|
||||||
|
@ -120,7 +124,7 @@ kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache, unsigned long fla
|
||||||
constructor = kcc->kcc_constructor;
|
constructor = kcc->kcc_constructor;
|
||||||
private = kcc->kcc_private;
|
private = kcc->kcc_private;
|
||||||
|
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock_irqrestore(&kmem_cache_cb_lock, irqflags);
|
||||||
|
|
||||||
if (constructor)
|
if (constructor)
|
||||||
constructor(ptr, private, (int)flags);
|
constructor(ptr, private, (int)flags);
|
||||||
|
@ -133,9 +137,10 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
|
||||||
{
|
{
|
||||||
kmem_cache_cb_t *kcc;
|
kmem_cache_cb_t *kcc;
|
||||||
kmem_destructor_t destructor;
|
kmem_destructor_t destructor;
|
||||||
|
unsigned long irqflags;
|
||||||
void *private;
|
void *private;
|
||||||
|
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
spin_lock_irqsave(&kmem_cache_cb_lock, irqflags);
|
||||||
|
|
||||||
/* Callback list must be in sync with linux slab caches */
|
/* Callback list must be in sync with linux slab caches */
|
||||||
kcc = kmem_cache_find_cache_cb(cache);
|
kcc = kmem_cache_find_cache_cb(cache);
|
||||||
|
@ -143,7 +148,7 @@ kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flag
|
||||||
destructor = kcc->kcc_destructor;
|
destructor = kcc->kcc_destructor;
|
||||||
private = kcc->kcc_private;
|
private = kcc->kcc_private;
|
||||||
|
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock_irqrestore(&kmem_cache_cb_lock, irqflags);
|
||||||
|
|
||||||
/* Solaris destructor takes no flags, silently eat them */
|
/* Solaris destructor takes no flags, silently eat them */
|
||||||
if (destructor)
|
if (destructor)
|
||||||
|
@ -155,6 +160,7 @@ static int
|
||||||
kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
||||||
{
|
{
|
||||||
kmem_cache_cb_t *kcc;
|
kmem_cache_cb_t *kcc;
|
||||||
|
unsigned long flags;
|
||||||
int total = 0;
|
int total = 0;
|
||||||
|
|
||||||
/* Under linux a shrinker is not tightly coupled with a slab
|
/* Under linux a shrinker is not tightly coupled with a slab
|
||||||
|
@ -164,7 +170,7 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
||||||
* function in the shim layer for all slab caches. And we always
|
* function in the shim layer for all slab caches. And we always
|
||||||
* attempt to shrink all caches when this generic shrinker is called.
|
* attempt to shrink all caches when this generic shrinker is called.
|
||||||
*/
|
*/
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||||
|
|
||||||
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
|
list_for_each_entry(kcc, &kmem_cache_cb_list, kcc_list) {
|
||||||
/* Under linux the desired number and gfp type of objects
|
/* Under linux the desired number and gfp type of objects
|
||||||
|
@ -185,7 +191,7 @@ kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
|
||||||
* was registered with the generic shrinker. This should fake out
|
* was registered with the generic shrinker. This should fake out
|
||||||
* the linux VM when it attempts to shrink caches.
|
* the linux VM when it attempts to shrink caches.
|
||||||
*/
|
*/
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -257,11 +263,12 @@ __kmem_cache_destroy(kmem_cache_t *cache)
|
||||||
{
|
{
|
||||||
kmem_cache_cb_t *kcc;
|
kmem_cache_cb_t *kcc;
|
||||||
char *name;
|
char *name;
|
||||||
|
unsigned long flags;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||||
kcc = kmem_cache_find_cache_cb(cache);
|
kcc = kmem_cache_find_cache_cb(cache);
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||||
if (kcc == NULL)
|
if (kcc == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
@ -271,11 +278,11 @@ __kmem_cache_destroy(kmem_cache_t *cache)
|
||||||
kfree(name);
|
kfree(name);
|
||||||
|
|
||||||
/* Unregister generic shrinker on removal of all caches */
|
/* Unregister generic shrinker on removal of all caches */
|
||||||
spin_lock(&kmem_cache_cb_lock);
|
spin_lock_irqsave(&kmem_cache_cb_lock, flags);
|
||||||
if (list_empty(&kmem_cache_cb_list))
|
if (list_empty(&kmem_cache_cb_list))
|
||||||
remove_shrinker(kmem_cache_shrinker);
|
remove_shrinker(kmem_cache_shrinker);
|
||||||
|
|
||||||
spin_unlock(&kmem_cache_cb_lock);
|
spin_unlock_irqrestore(&kmem_cache_cb_lock, flags);
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__kmem_cache_destroy);
|
EXPORT_SYMBOL(__kmem_cache_destroy);
|
||||||
|
|
Loading…
Reference in New Issue