Fix for memory corruption caused by overruning the magazine

when repopulating it.  Plus I fixed a few more suble races in
that part of the code which were catching me.  Finally I fixed
a small race in kmem_test8.



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@137 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-06-26 19:49:42 +00:00
parent 4afaaefa05
commit e9d7a2bef5
3 changed files with 94 additions and 62 deletions

View File

@ -49,7 +49,7 @@ extern "C" {
#define KM_SLEEP GFP_KERNEL #define KM_SLEEP GFP_KERNEL
#define KM_NOSLEEP GFP_ATOMIC #define KM_NOSLEEP GFP_ATOMIC
#undef KM_PANIC /* No linux analog */ #undef KM_PANIC /* No linux analog */
#define KM_PUSHPAGE (GFP_KERNEL | __GFP_HIGH) #define KM_PUSHPAGE (KM_SLEEP | __GFP_HIGH)
#define KM_VMFLAGS GFP_LEVEL_MASK #define KM_VMFLAGS GFP_LEVEL_MASK
#define KM_FLAGS __GFP_BITS_MASK #define KM_FLAGS __GFP_BITS_MASK

View File

@ -319,9 +319,9 @@ spl_magazine_size(spl_kmem_cache_t *skc)
else if (skc->skc_obj_size > (PAGE_SIZE / 4)) else if (skc->skc_obj_size > (PAGE_SIZE / 4))
size = 32; size = 32;
else if (skc->skc_obj_size > (PAGE_SIZE / 16)) else if (skc->skc_obj_size > (PAGE_SIZE / 16))
size = 64; size = 48;
else else
size = 128; size = 64;
RETURN(size); RETURN(size);
} }
@ -492,6 +492,8 @@ spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
spl_kmem_slab_t *sks, *m; spl_kmem_slab_t *sks, *m;
ENTRY; ENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
down_write(&spl_kmem_cache_sem); down_write(&spl_kmem_cache_sem);
list_del_init(&skc->skc_list); list_del_init(&skc->skc_list);
up_write(&spl_kmem_cache_sem); up_write(&spl_kmem_cache_sem);
@ -535,6 +537,7 @@ spl_hash_obj(spl_kmem_cache_t *skc, void *obj)
unsigned long key = spl_hash_ptr(obj, skc->skc_hash_bits); unsigned long key = spl_hash_ptr(obj, skc->skc_hash_bits);
int i = 0; int i = 0;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock)); ASSERT(spin_is_locked(&skc->skc_lock));
hlist_for_each_entry(sko, node, &skc->skc_hash[key], sko_hlist) { hlist_for_each_entry(sko, node, &skc->skc_hash[key], sko_hlist) {
@ -557,6 +560,8 @@ spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
spl_kmem_obj_t *sko; spl_kmem_obj_t *sko;
unsigned long key; unsigned long key;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(sks->sks_magic == SKS_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock)); ASSERT(spin_is_locked(&skc->skc_lock));
sko = list_entry((&sks->sks_free_list)->next,spl_kmem_obj_t,sko_list); sko = list_entry((&sks->sks_free_list)->next,spl_kmem_obj_t,sko_list);
@ -600,8 +605,10 @@ spl_cache_grow(spl_kmem_cache_t *skc, int flags)
spl_kmem_obj_t *sko; spl_kmem_obj_t *sko;
ENTRY; ENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
if (flags & __GFP_WAIT) { if (flags & __GFP_WAIT) {
flags |= __GFP_NOFAIL; // flags |= __GFP_NOFAIL; /* XXX: Solaris assumes this */
might_sleep(); might_sleep();
local_irq_enable(); local_irq_enable();
} }
@ -639,10 +646,14 @@ static int
spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
{ {
spl_kmem_slab_t *sks; spl_kmem_slab_t *sks;
int refill = skm->skm_refill; int rc = 0, refill;
ENTRY; ENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
/* XXX: Check for refill bouncing by age perhaps */ /* XXX: Check for refill bouncing by age perhaps */
refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock); spin_lock(&skc->skc_lock);
while (refill > 0) { while (refill > 0) {
@ -651,11 +662,16 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
spin_unlock(&skc->skc_lock); spin_unlock(&skc->skc_lock);
sks = spl_cache_grow(skc, flags); sks = spl_cache_grow(skc, flags);
if (!sks) if (!sks)
GOTO(out, refill); GOTO(out, rc);
/* Rescheduled to different CPU skm is not local */ /* Rescheduled to different CPU skm is not local */
if (skm != skc->skc_mag[smp_processor_id()]) if (skm != skc->skc_mag[smp_processor_id()])
GOTO(out, refill); GOTO(out, rc);
/* Potentially rescheduled to the same CPU but
* allocations may have occured from this CPU while
* we were sleeping so recalculate max refill. */
refill = MIN(refill, skm->skm_size - skm->skm_avail);
spin_lock(&skc->skc_lock); spin_lock(&skc->skc_lock);
continue; continue;
@ -669,10 +685,12 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
ASSERT(!list_empty(&sks->sks_free_list)); ASSERT(!list_empty(&sks->sks_free_list));
/* Consume as many objects as needed to refill the requested /* Consume as many objects as needed to refill the requested
* cache. We must be careful to lock here because our local * cache. We must also be careful not to overfill it. */
* magazine may not be local anymore due to spl_cache_grow. */ while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++rc) {
while ((sks->sks_ref < sks->sks_objs) && (refill-- > 0)) ASSERT(skm->skm_avail < skm->skm_size);
ASSERT(rc < skm->skm_size);
skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks); skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks);
}
/* Move slab to skc_complete_list when full */ /* Move slab to skc_complete_list when full */
if (sks->sks_ref == sks->sks_objs) { if (sks->sks_ref == sks->sks_objs) {
@ -684,7 +702,7 @@ spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
spin_unlock(&skc->skc_lock); spin_unlock(&skc->skc_lock);
out: out:
/* Returns the number of entries added to cache */ /* Returns the number of entries added to cache */
RETURN(skm->skm_refill - refill); RETURN(rc);
} }
static void static void
@ -694,6 +712,7 @@ spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
spl_kmem_obj_t *sko = NULL; spl_kmem_obj_t *sko = NULL;
ENTRY; ENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(spin_is_locked(&skc->skc_lock)); ASSERT(spin_is_locked(&skc->skc_lock));
sko = spl_hash_obj(skc, obj); sko = spl_hash_obj(skc, obj);
@ -738,12 +757,14 @@ spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
int i, count = MIN(flush, skm->skm_avail); int i, count = MIN(flush, skm->skm_avail);
ENTRY; ENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(skm->skm_magic == SKM_MAGIC);
spin_lock(&skc->skc_lock); spin_lock(&skc->skc_lock);
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
spl_cache_shrink(skc, skm->skm_objs[i]); spl_cache_shrink(skc, skm->skm_objs[i]);
__spl_slab_reclaim(skc); // __spl_slab_reclaim(skc);
skm->skm_avail -= count; skm->skm_avail -= count;
memmove(skm->skm_objs, &(skm->skm_objs[count]), memmove(skm->skm_objs, &(skm->skm_objs[count]),
sizeof(void *) * skm->skm_avail); sizeof(void *) * skm->skm_avail);
@ -759,9 +780,11 @@ spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
spl_kmem_magazine_t *skm; spl_kmem_magazine_t *skm;
unsigned long irq_flags; unsigned long irq_flags;
void *obj = NULL; void *obj = NULL;
int id;
ENTRY; ENTRY;
ASSERT(flags & KM_SLEEP); ASSERT(skc->skc_magic == SKC_MAGIC);
ASSERT(flags & KM_SLEEP); /* XXX: KM_NOSLEEP not yet supported */
local_irq_save(irq_flags); local_irq_save(irq_flags);
restart: restart:
@ -769,7 +792,12 @@ restart:
* in the restart case we must be careful to reaquire * in the restart case we must be careful to reaquire
* the local magazine since this may have changed * the local magazine since this may have changed
* when we need to grow the cache. */ * when we need to grow the cache. */
id = smp_processor_id();
ASSERTF(id < 4, "cache=%p smp_processor_id=%d\n", skc, id);
skm = skc->skc_mag[smp_processor_id()]; skm = skc->skc_mag[smp_processor_id()];
ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n",
skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm,
skm->skm_size, skm->skm_refill, skm->skm_avail);
if (likely(skm->skm_avail)) { if (likely(skm->skm_avail)) {
/* Object available in CPU cache, use it */ /* Object available in CPU cache, use it */
@ -798,6 +826,7 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
unsigned long flags; unsigned long flags;
ENTRY; ENTRY;
ASSERT(skc->skc_magic == SKC_MAGIC);
local_irq_save(flags); local_irq_save(flags);
/* Safe to update per-cpu structure without lock, but /* Safe to update per-cpu structure without lock, but
@ -805,10 +834,12 @@ spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
* it is entirely possible to allocate an object from one * it is entirely possible to allocate an object from one
* CPU cache and return it to another. */ * CPU cache and return it to another. */
skm = skc->skc_mag[smp_processor_id()]; skm = skc->skc_mag[smp_processor_id()];
ASSERT(skm->skm_magic == SKM_MAGIC);
/* Per-CPU cache full, flush it to make space */ /* Per-CPU cache full, flush it to make space */
if (unlikely(skm->skm_avail >= skm->skm_size)) if (unlikely(skm->skm_avail >= skm->skm_size))
(void)spl_cache_flush(skc, skm, skm->skm_refill); (void)spl_cache_flush(skc, skm, skm->skm_refill);
(void)spl_cache_flush(skc, skm, 1);
/* Available space in cache, use it */ /* Available space in cache, use it */
skm->skm_objs[skm->skm_avail++] = obj; skm->skm_objs[skm->skm_avail++] = obj;
@ -850,7 +881,8 @@ spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
spl_kmem_magazine_t *skm; spl_kmem_magazine_t *skm;
int i; int i;
ENTRY; ENTRY;
ASSERT(skc && skc->skc_magic == SKC_MAGIC);
ASSERT(skc->skc_magic == SKC_MAGIC);
if (skc->skc_reclaim) if (skc->skc_reclaim)
skc->skc_reclaim(skc->skc_private); skc->skc_reclaim(skc->skc_private);
@ -965,7 +997,6 @@ spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
flag = 0; flag = 0;
break; break;
} }
} }
if (!flag) { if (!flag) {

View File

@ -553,9 +553,10 @@ out:
kcp->kcp_threads--; kcp->kcp_threads--;
if (!kcp->kcp_rc) if (!kcp->kcp_rc)
kcp->kcp_rc = rc; kcp->kcp_rc = rc;
spin_unlock(&kcp->kcp_lock);
wake_up(&kcp->kcp_waitq); wake_up(&kcp->kcp_waitq);
spin_unlock(&kcp->kcp_lock);
thread_exit(); thread_exit();
} }