Remove KMC_OFFSLAB
Remove dead code to make the implementation easier to understand. Reviewed-by: Ryan Moeller <ryan@ixsystems.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Matt Ahrens <matt@delphix.com> Closes #10650
This commit is contained in:
parent
d87676a9fa
commit
492db125dc
|
@ -45,7 +45,6 @@ typedef enum kmc_bit {
|
||||||
KMC_BIT_VMEM = 6, /* Use vmem cache */
|
KMC_BIT_VMEM = 6, /* Use vmem cache */
|
||||||
KMC_BIT_KVMEM = 7, /* Use kvmalloc linux allocator */
|
KMC_BIT_KVMEM = 7, /* Use kvmalloc linux allocator */
|
||||||
KMC_BIT_SLAB = 8, /* Use Linux slab cache */
|
KMC_BIT_SLAB = 8, /* Use Linux slab cache */
|
||||||
KMC_BIT_OFFSLAB = 9, /* Objects not on slab */
|
|
||||||
KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */
|
KMC_BIT_DEADLOCKED = 14, /* Deadlock detected */
|
||||||
KMC_BIT_GROWING = 15, /* Growing in progress */
|
KMC_BIT_GROWING = 15, /* Growing in progress */
|
||||||
KMC_BIT_REAPING = 16, /* Reaping in progress */
|
KMC_BIT_REAPING = 16, /* Reaping in progress */
|
||||||
|
@ -73,7 +72,6 @@ typedef enum kmem_cbrc {
|
||||||
#define KMC_VMEM (1 << KMC_BIT_VMEM)
|
#define KMC_VMEM (1 << KMC_BIT_VMEM)
|
||||||
#define KMC_KVMEM (1 << KMC_BIT_KVMEM)
|
#define KMC_KVMEM (1 << KMC_BIT_KVMEM)
|
||||||
#define KMC_SLAB (1 << KMC_BIT_SLAB)
|
#define KMC_SLAB (1 << KMC_BIT_SLAB)
|
||||||
#define KMC_OFFSLAB (1 << KMC_BIT_OFFSLAB)
|
|
||||||
#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)
|
#define KMC_DEADLOCKED (1 << KMC_BIT_DEADLOCKED)
|
||||||
#define KMC_GROWING (1 << KMC_BIT_GROWING)
|
#define KMC_GROWING (1 << KMC_BIT_GROWING)
|
||||||
#define KMC_REAPING (1 << KMC_BIT_REAPING)
|
#define KMC_REAPING (1 << KMC_BIT_REAPING)
|
||||||
|
|
|
@ -259,16 +259,6 @@ spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
|
||||||
skc->skc_obj_align, uint32_t));
|
skc->skc_obj_align, uint32_t));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Required space for each offslab object taking in to account alignment
|
|
||||||
* restrictions and the power-of-two requirement of kv_alloc().
|
|
||||||
*/
|
|
||||||
static inline uint32_t
|
|
||||||
spl_offslab_size(spl_kmem_cache_t *skc)
|
|
||||||
{
|
|
||||||
return (1UL << (fls64(spl_obj_size(skc)) + 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It's important that we pack the spl_kmem_obj_t structure and the
|
* It's important that we pack the spl_kmem_obj_t structure and the
|
||||||
* actual objects in to one large address space to minimize the number
|
* actual objects in to one large address space to minimize the number
|
||||||
|
@ -289,25 +279,21 @@ spl_offslab_size(spl_kmem_cache_t *skc)
|
||||||
* different allocation functions for small and large objects should
|
* different allocation functions for small and large objects should
|
||||||
* give us the best of both worlds.
|
* give us the best of both worlds.
|
||||||
*
|
*
|
||||||
* KMC_ONSLAB KMC_OFFSLAB
|
* +------------------------+
|
||||||
*
|
* | spl_kmem_slab_t --+-+ |
|
||||||
* +------------------------+ +-----------------+
|
* | skc_obj_size <-+ | |
|
||||||
* | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
|
* | spl_kmem_obj_t | |
|
||||||
* | skc_obj_size <-+ | | +-----------------+ | |
|
* | skc_obj_size <---+ |
|
||||||
* | spl_kmem_obj_t | | | |
|
* | spl_kmem_obj_t | |
|
||||||
* | skc_obj_size <---+ | +-----------------+ | |
|
* | ... v |
|
||||||
* | spl_kmem_obj_t | | | skc_obj_size | <-+ |
|
* +------------------------+
|
||||||
* | ... v | | spl_kmem_obj_t | |
|
|
||||||
* +------------------------+ +-----------------+ v
|
|
||||||
*/
|
*/
|
||||||
static spl_kmem_slab_t *
|
static spl_kmem_slab_t *
|
||||||
spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
|
spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
|
||||||
{
|
{
|
||||||
spl_kmem_slab_t *sks;
|
spl_kmem_slab_t *sks;
|
||||||
spl_kmem_obj_t *sko;
|
void *base;
|
||||||
void *base, *obj;
|
uint32_t obj_size;
|
||||||
uint32_t obj_size, offslab_size = 0;
|
|
||||||
int i, rc = 0;
|
|
||||||
|
|
||||||
base = kv_alloc(skc, skc->skc_slab_size, flags);
|
base = kv_alloc(skc, skc->skc_slab_size, flags);
|
||||||
if (base == NULL)
|
if (base == NULL)
|
||||||
|
@ -323,22 +309,11 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
|
||||||
sks->sks_ref = 0;
|
sks->sks_ref = 0;
|
||||||
obj_size = spl_obj_size(skc);
|
obj_size = spl_obj_size(skc);
|
||||||
|
|
||||||
if (skc->skc_flags & KMC_OFFSLAB)
|
for (int i = 0; i < sks->sks_objs; i++) {
|
||||||
offslab_size = spl_offslab_size(skc);
|
void *obj = base + spl_sks_size(skc) + (i * obj_size);
|
||||||
|
|
||||||
for (i = 0; i < sks->sks_objs; i++) {
|
|
||||||
if (skc->skc_flags & KMC_OFFSLAB) {
|
|
||||||
obj = kv_alloc(skc, offslab_size, flags);
|
|
||||||
if (!obj) {
|
|
||||||
rc = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
obj = base + spl_sks_size(skc) + (i * obj_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
|
ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
|
||||||
sko = spl_sko_from_obj(skc, obj);
|
spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
|
||||||
sko->sko_addr = obj;
|
sko->sko_addr = obj;
|
||||||
sko->sko_magic = SKO_MAGIC;
|
sko->sko_magic = SKO_MAGIC;
|
||||||
sko->sko_slab = sks;
|
sko->sko_slab = sks;
|
||||||
|
@ -346,19 +321,6 @@ spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
|
||||||
list_add_tail(&sko->sko_list, &sks->sks_free_list);
|
list_add_tail(&sko->sko_list, &sks->sks_free_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
|
||||||
if (rc) {
|
|
||||||
spl_kmem_obj_t *n = NULL;
|
|
||||||
if (skc->skc_flags & KMC_OFFSLAB)
|
|
||||||
list_for_each_entry_safe(sko,
|
|
||||||
n, &sks->sks_free_list, sko_list) {
|
|
||||||
kv_free(skc, sko->sko_addr, offslab_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
kv_free(skc, base, skc->skc_slab_size);
|
|
||||||
sks = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (sks);
|
return (sks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,7 +364,6 @@ spl_slab_reclaim(spl_kmem_cache_t *skc)
|
||||||
spl_kmem_obj_t *sko = NULL, *n = NULL;
|
spl_kmem_obj_t *sko = NULL, *n = NULL;
|
||||||
LIST_HEAD(sks_list);
|
LIST_HEAD(sks_list);
|
||||||
LIST_HEAD(sko_list);
|
LIST_HEAD(sko_list);
|
||||||
uint32_t size = 0;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Empty slabs and objects must be moved to a private list so they
|
* Empty slabs and objects must be moved to a private list so they
|
||||||
|
@ -422,21 +383,15 @@ spl_slab_reclaim(spl_kmem_cache_t *skc)
|
||||||
spin_unlock(&skc->skc_lock);
|
spin_unlock(&skc->skc_lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following two loops ensure all the object destructors are
|
* The following two loops ensure all the object destructors are run,
|
||||||
* run, any offslab objects are freed, and the slabs themselves
|
* and the slabs themselves are freed. This is all done outside the
|
||||||
* are freed. This is all done outside the skc->skc_lock since
|
* skc->skc_lock since this allows the destructor to sleep, and
|
||||||
* this allows the destructor to sleep, and allows us to perform
|
* allows us to perform a conditional reschedule when a freeing a
|
||||||
* a conditional reschedule when a freeing a large number of
|
* large number of objects and slabs back to the system.
|
||||||
* objects and slabs back to the system.
|
|
||||||
*/
|
*/
|
||||||
if (skc->skc_flags & KMC_OFFSLAB)
|
|
||||||
size = spl_offslab_size(skc);
|
|
||||||
|
|
||||||
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
|
list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
|
||||||
ASSERT(sko->sko_magic == SKO_MAGIC);
|
ASSERT(sko->sko_magic == SKO_MAGIC);
|
||||||
|
|
||||||
if (skc->skc_flags & KMC_OFFSLAB)
|
|
||||||
kv_free(skc, sko->sko_addr, size);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
|
list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
|
||||||
|
@ -603,37 +558,28 @@ spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
|
||||||
{
|
{
|
||||||
uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
|
uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
|
||||||
|
|
||||||
if (skc->skc_flags & KMC_OFFSLAB) {
|
sks_size = spl_sks_size(skc);
|
||||||
tgt_objs = spl_kmem_cache_obj_per_slab;
|
obj_size = spl_obj_size(skc);
|
||||||
tgt_size = P2ROUNDUP(sizeof (spl_kmem_slab_t), PAGE_SIZE);
|
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
|
||||||
|
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
|
||||||
|
|
||||||
if ((skc->skc_flags & KMC_KMEM) &&
|
/*
|
||||||
(spl_obj_size(skc) > (SPL_MAX_ORDER_NR_PAGES * PAGE_SIZE)))
|
* KMC_KMEM slabs are allocated by __get_free_pages() which
|
||||||
return (-ENOSPC);
|
* rounds up to the nearest order. Knowing this the size
|
||||||
|
* should be rounded up to the next power of two with a hard
|
||||||
|
* maximum defined by the maximum allowed allocation order.
|
||||||
|
*/
|
||||||
|
if (skc->skc_flags & KMC_KMEM) {
|
||||||
|
max_size = SPL_MAX_ORDER_NR_PAGES * PAGE_SIZE;
|
||||||
|
tgt_size = MIN(max_size,
|
||||||
|
PAGE_SIZE * (1 << MAX(get_order(tgt_size) - 1, 1)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tgt_size <= max_size) {
|
||||||
|
tgt_objs = (tgt_size - sks_size) / obj_size;
|
||||||
} else {
|
} else {
|
||||||
sks_size = spl_sks_size(skc);
|
tgt_objs = (max_size - sks_size) / obj_size;
|
||||||
obj_size = spl_obj_size(skc);
|
tgt_size = (tgt_objs * obj_size) + sks_size;
|
||||||
max_size = (spl_kmem_cache_max_size * 1024 * 1024);
|
|
||||||
tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* KMC_KMEM slabs are allocated by __get_free_pages() which
|
|
||||||
* rounds up to the nearest order. Knowing this the size
|
|
||||||
* should be rounded up to the next power of two with a hard
|
|
||||||
* maximum defined by the maximum allowed allocation order.
|
|
||||||
*/
|
|
||||||
if (skc->skc_flags & KMC_KMEM) {
|
|
||||||
max_size = SPL_MAX_ORDER_NR_PAGES * PAGE_SIZE;
|
|
||||||
tgt_size = MIN(max_size,
|
|
||||||
PAGE_SIZE * (1 << MAX(get_order(tgt_size) - 1, 1)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tgt_size <= max_size) {
|
|
||||||
tgt_objs = (tgt_size - sks_size) / obj_size;
|
|
||||||
} else {
|
|
||||||
tgt_objs = (max_size - sks_size) / obj_size;
|
|
||||||
tgt_size = (tgt_objs * obj_size) + sks_size;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tgt_objs == 0)
|
if (tgt_objs == 0)
|
||||||
|
@ -772,9 +718,8 @@ spl_magazine_destroy(spl_kmem_cache_t *skc)
|
||||||
* flags
|
* flags
|
||||||
* KMC_KMEM Force SPL kmem backed cache
|
* KMC_KMEM Force SPL kmem backed cache
|
||||||
* KMC_VMEM Force SPL vmem backed cache
|
* KMC_VMEM Force SPL vmem backed cache
|
||||||
* KMC_KVMEM Force kvmem backed cache
|
* KMC_KVMEM Force kvmem backed SPL cache
|
||||||
* KMC_SLAB Force Linux slab backed cache
|
* KMC_SLAB Force Linux slab backed cache
|
||||||
* KMC_OFFSLAB Locate objects off the slab
|
|
||||||
* KMC_NOTOUCH Disable cache object aging (unsupported)
|
* KMC_NOTOUCH Disable cache object aging (unsupported)
|
||||||
* KMC_NODEBUG Disable debugging (unsupported)
|
* KMC_NODEBUG Disable debugging (unsupported)
|
||||||
* KMC_NOHASH Disable hashing (unsupported)
|
* KMC_NOHASH Disable hashing (unsupported)
|
||||||
|
|
Loading…
Reference in New Issue