* : modules/sys/kmem-slab.c : Re-implemented the slab to no

longer be based on the linux slab but to be its own complete
implementation.  The new slab behaves much more like the
Solaris slab than the Linux slab.



git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@132 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
behlendo 2008-06-13 23:41:06 +00:00
parent cfe5749941
commit 2fb9b26a85
7 changed files with 1023 additions and 717 deletions

View File

@ -1,3 +1,10 @@
2008-06-13 Brian Behlendorf <behlendorf1@llnl.gov>
* : modules/sys/kmem-slab.c : Re-implemented the slab to no
longer be based on the linux slab but to be it's own complete
implementation. The new slab behaves much more like the
Solaris slab than the Linux slab.
2008-06-04 Brian Behlendorf <behlendorf1@llnl.gov> 2008-06-04 Brian Behlendorf <behlendorf1@llnl.gov>
* : Tag spl-0.3.2 * : Tag spl-0.3.2

View File

@ -308,11 +308,11 @@ kmem_alloc_tryhard(size_t size, size_t *alloc_size, int kmflags)
/* /*
* Slab allocation interfaces * Slab allocation interfaces
*/ */
#undef KMC_NOTOUCH /* No linux analog */ #undef KMC_NOTOUCH /* XXX: Unsupported */
#define KMC_NODEBUG 0x00000000 /* Default behavior */ #define KMC_NODEBUG 0x00000000 /* Default behavior */
#define KMC_NOMAGAZINE /* No linux analog */ #define KMC_NOMAGAZINE /* XXX: Unsupported */
#define KMC_NOHASH /* No linux analog */ #define KMC_NOHASH /* XXX: Unsupported */
#define KMC_QCACHE /* No linux analog */ #define KMC_QCACHE /* XXX: Unsupported */
#define KMC_REAP_CHUNK 256 #define KMC_REAP_CHUNK 256
#define KMC_DEFAULT_SEEKS DEFAULT_SEEKS #define KMC_DEFAULT_SEEKS DEFAULT_SEEKS
@ -342,7 +342,7 @@ static __inline__ size_t kmem_maxavail(void) {
#error "kmem_maxavail() not implemented" #error "kmem_maxavail() not implemented"
} }
static __inline__ uint64_t kmem_cache_stat(kmem_cache_t *cache) { static __inline__ uint64_t kmem_cache_stat(spl_kmem_cache_t *cache) {
#error "kmem_cache_stat() not implemented" #error "kmem_cache_stat() not implemented"
} }
#endif /* DEBUG_KMEM_UNIMPLEMENTED */ #endif /* DEBUG_KMEM_UNIMPLEMENTED */
@ -357,34 +357,101 @@ kmem_debugging(void)
return 0; return 0;
} }
typedef int (*kmem_constructor_t)(void *, void *, int);
typedef void (*kmem_destructor_t)(void *, void *);
typedef void (*kmem_reclaim_t)(void *);
extern int kmem_set_warning(int flag); extern int kmem_set_warning(int flag);
extern kmem_cache_t *
__kmem_cache_create(char *name, size_t size, size_t align, #define SKO_MAGIC 0x20202020
kmem_constructor_t constructor, #define SKS_MAGIC 0x22222222
kmem_destructor_t destructor, #define SKC_MAGIC 0x2c2c2c2c
kmem_reclaim_t reclaim,
#define SPL_KMEM_CACHE_HASH_BITS 12 /* 4k, sized for 1000's of objs */
#define SPL_KMEM_CACHE_HASH_ELTS (1 << SPL_KMEM_CACHE_HASH_BITS)
#define SPL_KMEM_CACHE_HASH_SIZE (sizeof(struct hlist_head) * \
SPL_KMEM_CACHE_HASH_ELTS)
#define SPL_KMEM_CACHE_DELAY 5
#define SPL_KMEM_CACHE_OBJ_PER_SLAB 32
typedef int (*spl_kmem_ctor_t)(void *, void *, int);
typedef void (*spl_kmem_dtor_t)(void *, void *);
typedef void (*spl_kmem_reclaim_t)(void *);
typedef struct spl_kmem_obj {
uint32_t sko_magic; /* Sanity magic */
uint32_t sko_flags; /* Per object flags */
void *sko_addr; /* Buffer address */
struct spl_kmem_slab *sko_slab; /* Owned by slab */
struct list_head sko_list; /* Free object list linkage */
struct hlist_node sko_hlist; /* Used object hash linkage */
} spl_kmem_obj_t;
typedef struct spl_kmem_slab {
uint32_t sks_magic; /* Sanity magic */
uint32_t sks_objs; /* Objects per slab */
struct spl_kmem_cache *sks_cache; /* Owned by cache */
struct list_head sks_list; /* Slab list linkage */
struct list_head sks_free_list; /* Free object list */
unsigned long sks_age; /* Last modify jiffie */
atomic_t sks_ref; /* Ref count used objects */
} spl_kmem_slab_t;
typedef struct spl_kmem_cache {
uint32_t skc_magic; /* Sanity magic */
uint32_t skc_name_size; /* Name length */
char *skc_name; /* Name string */
spl_kmem_ctor_t skc_ctor; /* Constructor */
spl_kmem_dtor_t skc_dtor; /* Destructor */
spl_kmem_reclaim_t skc_reclaim; /* Reclaimator */
void *skc_private; /* Private data */
void *skc_vmp; /* Unused */
uint32_t skc_flags; /* Flags */
uint32_t skc_obj_size; /* Object size */
uint32_t skc_chunk_size; /* sizeof(*obj) + alignment */
uint32_t skc_slab_size; /* slab size */
uint32_t skc_max_chunks; /* max chunks per slab */
uint32_t skc_delay; /* slab reclaim interval */
uint32_t skc_hash_bits; /* Hash table bits */
uint32_t skc_hash_size; /* Hash table size */
uint32_t skc_hash_elts; /* Hash table elements */
struct hlist_head *skc_hash; /* Hash table address */
struct list_head skc_list; /* List of caches linkage */
struct list_head skc_complete_list;/* Completely alloc'ed */
struct list_head skc_partial_list; /* Partially alloc'ed */
struct rw_semaphore skc_sem; /* Cache semaphore */
uint64_t skc_slab_fail; /* Slab alloc failures */
uint64_t skc_slab_create;/* Slab creates */
uint64_t skc_slab_destroy;/* Slab destroys */
uint64_t skc_slab_total; /* Slab total */
uint64_t skc_slab_alloc; /* Slab alloc */
uint64_t skc_slab_max; /* Slab max */
uint64_t skc_obj_total; /* Obj total */
uint64_t skc_obj_alloc; /* Obj alloc */
uint64_t skc_obj_max; /* Obj max */
uint64_t skc_hash_depth; /* Hash depth */
uint64_t skc_hash_max; /* Hash depth max */
} spl_kmem_cache_t;
extern spl_kmem_cache_t *
spl_kmem_cache_create(char *name, size_t size, size_t align,
spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim,
void *priv, void *vmp, int flags); void *priv, void *vmp, int flags);
extern int __kmem_cache_destroy(kmem_cache_t *cache); extern void spl_kmem_cache_destroy(spl_kmem_cache_t *skc);
extern void *__kmem_cache_alloc(kmem_cache_t *cache, gfp_t flags); extern void *spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags);
extern void __kmem_cache_free(kmem_cache_t *cache, void *obj); extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
extern void __kmem_reap(void); extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc);
extern void spl_kmem_reap(void);
int kmem_init(void); int spl_kmem_init(void);
void kmem_fini(void); void spl_kmem_fini(void);
#define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \ #define kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) \
__kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags) spl_kmem_cache_create(name,size,align,ctor,dtor,rclm,priv,vmp,flags)
#define kmem_cache_destroy(cache) __kmem_cache_destroy(cache) #define kmem_cache_destroy(skc) spl_kmem_cache_destroy(skc)
#define kmem_cache_alloc(cache, flags) __kmem_cache_alloc(cache, flags) #define kmem_cache_alloc(skc, flags) spl_kmem_cache_alloc(skc, flags)
#define kmem_cache_free(cache, obj) __kmem_cache_free(cache, obj) #define kmem_cache_free(skc, obj) spl_kmem_cache_free(skc, obj)
#define kmem_cache_reap_now(cache) kmem_cache_shrink(cache) #define kmem_cache_reap_now(skc) spl_kmem_cache_reap_now(skc)
#define kmem_reap() __kmem_reap() #define kmem_reap() spl_kmem_reap()
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -16,6 +16,7 @@ typedef unsigned long uintptr_t;
#ifndef HAVE_KMEM_CACHE_T #ifndef HAVE_KMEM_CACHE_T
typedef struct kmem_cache kmem_cache_t; typedef struct kmem_cache kmem_cache_t;
#endif #endif
#define kmem_cache_t spl_kmem_cache_t
typedef enum { B_FALSE=0, B_TRUE=1 } boolean_t; typedef enum { B_FALSE=0, B_TRUE=1 } boolean_t;
typedef unsigned long intptr_t; typedef unsigned long intptr_t;

View File

@ -130,7 +130,7 @@ static int __init spl_init(void)
if ((rc = debug_init())) if ((rc = debug_init()))
return rc; return rc;
if ((rc = kmem_init())) if ((rc = spl_kmem_init()))
GOTO(out , rc); GOTO(out , rc);
if ((rc = spl_mutex_init())) if ((rc = spl_mutex_init()))
@ -159,7 +159,7 @@ out4:
out3: out3:
spl_mutex_fini(); spl_mutex_fini();
out2: out2:
kmem_fini(); spl_kmem_fini();
out: out:
debug_fini(); debug_fini();
@ -176,7 +176,8 @@ static void spl_fini(void)
kstat_fini(); kstat_fini();
proc_fini(); proc_fini();
vn_fini(); vn_fini();
kmem_fini(); spl_mutex_fini();
spl_kmem_fini();
debug_fini(); debug_fini();
} }

File diff suppressed because it is too large Load Diff

View File

@ -633,7 +633,7 @@ void
vn_fini(void) vn_fini(void)
{ {
file_t *fp, *next_fp; file_t *fp, *next_fp;
int rc, leaked = 0; int leaked = 0;
ENTRY; ENTRY;
spin_lock(&vn_file_lock); spin_lock(&vn_file_lock);
@ -644,19 +644,14 @@ vn_fini(void)
leaked++; leaked++;
} }
rc = kmem_cache_destroy(vn_file_cache); kmem_cache_destroy(vn_file_cache);
if (rc)
CWARN("Warning leaked vn_file_cache objects, %d\n", rc);
vn_file_cache = NULL; vn_file_cache = NULL;
spin_unlock(&vn_file_lock); spin_unlock(&vn_file_lock);
if (leaked > 0) if (leaked > 0)
CWARN("Warning %d files leaked\n", leaked); CWARN("Warning %d files leaked\n", leaked);
rc = kmem_cache_destroy(vn_cache); kmem_cache_destroy(vn_cache);
if (rc)
CWARN("Warning leaked vn_cache objects, %d\n", rc);
EXIT; EXIT;
return; return;

View File

@ -39,16 +39,24 @@
#define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)" #define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
#define SPLAT_KMEM_TEST3_ID 0x0103 #define SPLAT_KMEM_TEST3_ID 0x0103
#define SPLAT_KMEM_TEST3_NAME "slab_alloc" #define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
#define SPLAT_KMEM_TEST3_DESC "Slab constructor/destructor test" #define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
#define SPLAT_KMEM_TEST4_ID 0x0104 #define SPLAT_KMEM_TEST4_ID 0x0104
#define SPLAT_KMEM_TEST4_NAME "slab_reap" #define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
#define SPLAT_KMEM_TEST4_DESC "Slab reaping test" #define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
#define SPLAT_KMEM_TEST5_ID 0x0105 #define SPLAT_KMEM_TEST5_ID 0x0105
#define SPLAT_KMEM_TEST5_NAME "vmem_alloc" #define SPLAT_KMEM_TEST5_NAME "kmem_cache1"
#define SPLAT_KMEM_TEST5_DESC "Memory allocation test (vmem_alloc)" #define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
#define SPLAT_KMEM_TEST6_ID 0x0106
#define SPLAT_KMEM_TEST6_NAME "kmem_cache2"
#define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
#define SPLAT_KMEM_TEST7_ID 0x0107
#define SPLAT_KMEM_TEST7_NAME "kmem_reap"
#define SPLAT_KMEM_TEST7_DESC "Slab reaping test"
#define SPLAT_KMEM_ALLOC_COUNT 10 #define SPLAT_KMEM_ALLOC_COUNT 10
#define SPLAT_VMEM_ALLOC_COUNT 10 #define SPLAT_VMEM_ALLOC_COUNT 10
@ -142,228 +150,8 @@ splat_kmem_test2(struct file *file, void *arg)
return rc; return rc;
} }
#define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
#define SPLAT_KMEM_CACHE_NAME "kmem_test"
#define SPLAT_KMEM_CACHE_SIZE 256
#define SPLAT_KMEM_OBJ_COUNT 128
#define SPLAT_KMEM_OBJ_RECLAIM 64
typedef struct kmem_cache_data {
char kcd_buf[SPLAT_KMEM_CACHE_SIZE];
unsigned long kcd_magic;
int kcd_flag;
} kmem_cache_data_t;
typedef struct kmem_cache_priv {
unsigned long kcp_magic;
struct file *kcp_file;
kmem_cache_t *kcp_cache;
kmem_cache_data_t *kcp_kcd[SPLAT_KMEM_OBJ_COUNT];
int kcp_count;
int kcp_rc;
} kmem_cache_priv_t;
static int
splat_kmem_test34_constructor(void *ptr, void *priv, int flags)
{
kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
if (kcd) {
memset(kcd->kcd_buf, 0xaa, SPLAT_KMEM_CACHE_SIZE);
kcd->kcd_flag = 1;
if (kcp) {
kcd->kcd_magic = kcp->kcp_magic;
kcp->kcp_count++;
}
}
return 0;
}
static void
splat_kmem_test34_destructor(void *ptr, void *priv)
{
kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
if (kcd) {
memset(kcd->kcd_buf, 0xbb, SPLAT_KMEM_CACHE_SIZE);
kcd->kcd_flag = 0;
if (kcp)
kcp->kcp_count--;
}
return;
}
static int static int
splat_kmem_test3(struct file *file, void *arg) splat_kmem_test3(struct file *file, void *arg)
{
kmem_cache_t *cache = NULL;
kmem_cache_data_t *kcd = NULL;
kmem_cache_priv_t kcp;
int rc = 0, max;
kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
kcp.kcp_file = file;
kcp.kcp_count = 0;
kcp.kcp_rc = 0;
cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, sizeof(*kcd), 0,
splat_kmem_test34_constructor,
splat_kmem_test34_destructor,
NULL, &kcp, NULL, 0);
if (!cache) {
splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
return -ENOMEM;
}
kcd = kmem_cache_alloc(cache, 0);
if (!kcd) {
splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
"Unable to allocate from '%s'\n",
SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
if (!kcd->kcd_flag) {
splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
"Failed to run contructor for '%s'\n",
SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
if (kcd->kcd_magic != kcp.kcp_magic) {
splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
"Failed to pass private data to constructor "
"for '%s'\n", SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
max = kcp.kcp_count;
/* Destructor's run lazily so it hard to check correctness here.
* We assume if it doesn't crash the free worked properly */
kmem_cache_free(cache, kcd);
/* Destroy the entire cache which will force destructors to
* run and we can verify one was called for every object */
kmem_cache_destroy(cache);
if (kcp.kcp_count) {
splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
"Failed to run destructor on all slab objects "
"for '%s'\n", SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
}
splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
"%d allocated/destroyed objects for '%s'\n",
max, SPLAT_KMEM_CACHE_NAME);
return rc;
out_free:
if (kcd)
kmem_cache_free(cache, kcd);
kmem_cache_destroy(cache);
return rc;
}
static void
splat_kmem_test4_reclaim(void *priv)
{
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
int i;
splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST4_NAME,
"Reaping %d objects from '%s'\n",
SPLAT_KMEM_OBJ_RECLAIM, SPLAT_KMEM_CACHE_NAME);
for (i = 0; i < SPLAT_KMEM_OBJ_RECLAIM; i++) {
if (kcp->kcp_kcd[i]) {
kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
kcp->kcp_kcd[i] = NULL;
}
}
return;
}
static int
splat_kmem_test4(struct file *file, void *arg)
{
kmem_cache_t *cache;
kmem_cache_priv_t kcp;
int i, rc = 0, max, reclaim_percent, target_percent;
kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
kcp.kcp_file = file;
kcp.kcp_count = 0;
kcp.kcp_rc = 0;
cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
sizeof(kmem_cache_data_t), 0,
splat_kmem_test34_constructor,
splat_kmem_test34_destructor,
splat_kmem_test4_reclaim, &kcp, NULL, 0);
if (!cache) {
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
return -ENOMEM;
}
kcp.kcp_cache = cache;
for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
/* All allocations need not succeed */
kcp.kcp_kcd[i] = kmem_cache_alloc(cache, 0);
if (!kcp.kcp_kcd[i]) {
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
"Unable to allocate from '%s'\n",
SPLAT_KMEM_CACHE_NAME);
}
}
max = kcp.kcp_count;
ASSERT(max > 0);
/* Force shrinker to run */
kmem_reap();
/* Reclaim reclaimed objects, this ensure the destructors are run */
kmem_cache_reap_now(cache);
reclaim_percent = ((kcp.kcp_count * 100) / max);
target_percent = (((SPLAT_KMEM_OBJ_COUNT - SPLAT_KMEM_OBJ_RECLAIM) * 100) /
SPLAT_KMEM_OBJ_COUNT);
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
"%d%% (%d/%d) of previous size, target of "
"%d%%-%d%% for '%s'\n", reclaim_percent, kcp.kcp_count,
max, target_percent - 10, target_percent + 10,
SPLAT_KMEM_CACHE_NAME);
if ((reclaim_percent < target_percent - 10) ||
(reclaim_percent > target_percent + 10))
rc = -EINVAL;
/* Cleanup our mess */
for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
if (kcp.kcp_kcd[i])
kmem_cache_free(cache, kcp.kcp_kcd[i]);
kmem_cache_destroy(cache);
return rc;
}
static int
splat_kmem_test5(struct file *file, void *arg)
{ {
void *ptr[SPLAT_VMEM_ALLOC_COUNT]; void *ptr[SPLAT_VMEM_ALLOC_COUNT];
int size = PAGE_SIZE; int size = PAGE_SIZE;
@ -382,7 +170,7 @@ splat_kmem_test5(struct file *file, void *arg)
if (ptr[i]) if (ptr[i])
vmem_free(ptr[i], size); vmem_free(ptr[i], size);
splat_vprint(file, SPLAT_KMEM_TEST5_NAME, splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
"%d byte allocations, %d/%d successful\n", "%d byte allocations, %d/%d successful\n",
size, count, SPLAT_VMEM_ALLOC_COUNT); size, count, SPLAT_VMEM_ALLOC_COUNT);
if (count != SPLAT_VMEM_ALLOC_COUNT) if (count != SPLAT_VMEM_ALLOC_COUNT)
@ -394,6 +182,312 @@ splat_kmem_test5(struct file *file, void *arg)
return rc; return rc;
} }
static int
splat_kmem_test4(struct file *file, void *arg)
{
void *ptr[SPLAT_VMEM_ALLOC_COUNT];
int size = PAGE_SIZE;
int i, j, count, rc = 0;
while ((!rc) && (size <= (PAGE_SIZE * 1024))) {
count = 0;
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
ptr[i] = vmem_zalloc(size, KM_SLEEP);
if (ptr[i])
count++;
}
/* Ensure buffer has been zero filled */
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
for (j = 0; j < size; j++) {
if (((char *)ptr[i])[j] != '\0') {
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
"%d-byte allocation was "
"not zeroed\n", size);
rc = -EFAULT;
}
}
}
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
if (ptr[i])
vmem_free(ptr[i], size);
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
"%d byte allocations, %d/%d successful\n",
size, count, SPLAT_VMEM_ALLOC_COUNT);
if (count != SPLAT_VMEM_ALLOC_COUNT)
rc = -ENOMEM;
size *= 2;
}
return rc;
}
#define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
#define SPLAT_KMEM_CACHE_NAME "kmem_test"
#define SPLAT_KMEM_OBJ_COUNT 128
#define SPLAT_KMEM_OBJ_RECLAIM 16
typedef struct kmem_cache_data {
unsigned long kcd_magic;
int kcd_flag;
char kcd_buf[0];
} kmem_cache_data_t;
typedef struct kmem_cache_priv {
unsigned long kcp_magic;
struct file *kcp_file;
kmem_cache_t *kcp_cache;
kmem_cache_data_t *kcp_kcd[SPLAT_KMEM_OBJ_COUNT];
int kcp_size;
int kcp_count;
int kcp_rc;
} kmem_cache_priv_t;
static int
splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
{
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
if (kcd) {
if (kcp) {
kcd->kcd_magic = kcp->kcp_magic;
kcp->kcp_count++;
}
memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
kcd->kcd_flag = 1;
}
return 0;
}
static void
splat_kmem_cache_test_destructor(void *ptr, void *priv)
{
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
if (kcd) {
if (kcp) {
kcd->kcd_magic = 0;
kcp->kcp_count--;
}
memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
kcd->kcd_flag = 0;
}
return;
}
static int
splat_kmem_cache_size_test(struct file *file, void *arg,
char *name, int size, int flags)
{
kmem_cache_t *cache = NULL;
kmem_cache_data_t *kcd = NULL;
kmem_cache_priv_t kcp;
int rc = 0, max;
kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
kcp.kcp_file = file;
kcp.kcp_size = size;
kcp.kcp_count = 0;
kcp.kcp_rc = 0;
cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp.kcp_size, 0,
splat_kmem_cache_test_constructor,
splat_kmem_cache_test_destructor,
NULL, &kcp, NULL, flags);
if (!cache) {
splat_vprint(file, name,
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
return -ENOMEM;
}
kcd = kmem_cache_alloc(cache, KM_SLEEP);
if (!kcd) {
splat_vprint(file, name,
"Unable to allocate from '%s'\n",
SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
if (!kcd->kcd_flag) {
splat_vprint(file, name,
"Failed to run contructor for '%s'\n",
SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
if (kcd->kcd_magic != kcp.kcp_magic) {
splat_vprint(file, name,
"Failed to pass private data to constructor "
"for '%s'\n", SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
goto out_free;
}
max = kcp.kcp_count;
kmem_cache_free(cache, kcd);
/* Destroy the entire cache which will force destructors to
* run and we can verify one was called for every object */
kmem_cache_destroy(cache);
if (kcp.kcp_count) {
splat_vprint(file, name,
"Failed to run destructor on all slab objects "
"for '%s'\n", SPLAT_KMEM_CACHE_NAME);
rc = -EINVAL;
}
splat_vprint(file, name,
"Successfully ran ctors/dtors for %d elements in '%s'\n",
max, SPLAT_KMEM_CACHE_NAME);
return rc;
out_free:
if (kcd)
kmem_cache_free(cache, kcd);
kmem_cache_destroy(cache);
return rc;
}
static int
splat_kmem_test5(struct file *file, void *arg)
{
return splat_kmem_cache_size_test(file, arg, SPLAT_KMEM_TEST5_NAME,
sizeof(kmem_cache_data_t) * 1, 0);
}
static int
splat_kmem_test6(struct file *file, void *arg)
{
return splat_kmem_cache_size_test(file, arg, SPLAT_KMEM_TEST6_NAME,
sizeof(kmem_cache_data_t) * 1024, 0);
}
static void
splat_kmem_cache_test_reclaim(void *priv)
{
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
int i, count;
count = min(SPLAT_KMEM_OBJ_RECLAIM, kcp->kcp_count);
splat_vprint(kcp->kcp_file, SPLAT_KMEM_TEST7_NAME,
"Reaping %d objects from '%s'\n", count,
SPLAT_KMEM_CACHE_NAME);
for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
if (kcp->kcp_kcd[i]) {
kmem_cache_free(kcp->kcp_cache, kcp->kcp_kcd[i]);
kcp->kcp_kcd[i] = NULL;
if (--count == 0)
break;
}
}
return;
}
static int
splat_kmem_test7(struct file *file, void *arg)
{
kmem_cache_t *cache;
kmem_cache_priv_t kcp;
int i, rc = 0;
kcp.kcp_magic = SPLAT_KMEM_TEST_MAGIC;
kcp.kcp_file = file;
kcp.kcp_size = 256;
kcp.kcp_count = 0;
kcp.kcp_rc = 0;
cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp.kcp_size, 0,
splat_kmem_cache_test_constructor,
splat_kmem_cache_test_destructor,
splat_kmem_cache_test_reclaim,
&kcp, NULL, 0);
if (!cache) {
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
return -ENOMEM;
}
kcp.kcp_cache = cache;
for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++) {
/* All allocations need not succeed */
kcp.kcp_kcd[i] = kmem_cache_alloc(cache, KM_SLEEP);
if (!kcp.kcp_kcd[i]) {
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"Unable to allocate from '%s'\n",
SPLAT_KMEM_CACHE_NAME);
}
}
ASSERT(kcp.kcp_count > 0);
/* Request the slab cache free any objects it can. For a few reasons
* this may not immediately result in more free memory even if objects
* are freed. First off, due to fragmentation we may not be able to
* reclaim any slabs. Secondly, even if we do we fully clear some
* slabs we will not want to immedately reclaim all of them because
* we may contend with cache allocs and thrash. What we want to see
* is slab size decrease more gradually as it becomes clear they
* will not be needed. This should be acheivable in less than minute
* if it takes longer than this something has gone wrong.
*/
for (i = 0; i < 60; i++) {
kmem_cache_reap_now(cache);
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"%s cache objects %d, slabs %u/%u objs %u/%u\n",
SPLAT_KMEM_CACHE_NAME, kcp.kcp_count,
(unsigned)cache->skc_slab_alloc,
(unsigned)cache->skc_slab_total,
(unsigned)cache->skc_obj_alloc,
(unsigned)cache->skc_obj_total);
if (cache->skc_obj_total == 0)
break;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ);
}
if (cache->skc_obj_total == 0) {
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"Successfully created %d objects "
"in cache %s and reclaimed them\n",
SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
} else {
splat_vprint(file, SPLAT_KMEM_TEST7_NAME,
"Failed to reclaim %u/%d objects from cache %s\n",
(unsigned)cache->skc_obj_total, SPLAT_KMEM_OBJ_COUNT,
SPLAT_KMEM_CACHE_NAME);
rc = -ENOMEM;
}
/* Cleanup our mess (for failure case of time expiring) */
for (i = 0; i < SPLAT_KMEM_OBJ_COUNT; i++)
if (kcp.kcp_kcd[i])
kmem_cache_free(cache, kcp.kcp_kcd[i]);
kmem_cache_destroy(cache);
return rc;
}
splat_subsystem_t * splat_subsystem_t *
splat_kmem_init(void) splat_kmem_init(void)
{ {
@ -421,6 +515,10 @@ splat_kmem_init(void)
SPLAT_KMEM_TEST4_ID, splat_kmem_test4); SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC, SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
SPLAT_KMEM_TEST5_ID, splat_kmem_test5); SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
return sub; return sub;
} }
@ -429,6 +527,8 @@ void
splat_kmem_fini(splat_subsystem_t *sub) splat_kmem_fini(splat_subsystem_t *sub)
{ {
ASSERT(sub); ASSERT(sub);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST7_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST6_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST5_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST4_ID);
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID); SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST3_ID);