Add ddt, ddt_entry, and l2arc_hdr caches
Back the allocations for ddt tables+entries and l2arc headers with kmem caches. This will reduce the cost of allocating these commonly used structures and allow for greater visibility of them through the /proc/spl/kmem/slab interface. Signed-off-by: John Layman <jlayman@sagecloud.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #1893
This commit is contained in:
parent
4dad7d91e2
commit
ecf3d9b8e6
|
@ -216,6 +216,8 @@ extern void ddt_decompress(uchar_t *src, void *dst, size_t s_len, size_t d_len);
|
|||
extern ddt_t *ddt_select(spa_t *spa, const blkptr_t *bp);
|
||||
extern void ddt_enter(ddt_t *ddt);
|
||||
extern void ddt_exit(ddt_t *ddt);
|
||||
extern void ddt_init(void);
|
||||
extern void ddt_fini(void);
|
||||
extern ddt_entry_t *ddt_lookup(ddt_t *ddt, const blkptr_t *bp, boolean_t add);
|
||||
extern void ddt_prefetch(spa_t *spa, const blkptr_t *bp);
|
||||
extern void ddt_remove(ddt_t *ddt, ddt_entry_t *dde);
|
||||
|
|
|
@ -899,6 +899,7 @@ buf_hash_remove(arc_buf_hdr_t *buf)
|
|||
*/
|
||||
static kmem_cache_t *hdr_cache;
|
||||
static kmem_cache_t *buf_cache;
|
||||
static kmem_cache_t *l2arc_hdr_cache;
|
||||
|
||||
static void
|
||||
buf_fini(void)
|
||||
|
@ -920,6 +921,7 @@ buf_fini(void)
|
|||
mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
|
||||
kmem_cache_destroy(hdr_cache);
|
||||
kmem_cache_destroy(buf_cache);
|
||||
kmem_cache_destroy(l2arc_hdr_cache);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1020,6 +1022,8 @@ retry:
|
|||
0, hdr_cons, hdr_dest, NULL, NULL, NULL, 0);
|
||||
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
|
||||
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
|
||||
l2arc_hdr_cache = kmem_cache_create("l2arc_buf_hdr_t", L2HDR_SIZE,
|
||||
0, NULL, NULL, NULL, NULL, NULL, 0);
|
||||
|
||||
for (i = 0; i < 256; i++)
|
||||
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
|
||||
|
@ -1678,7 +1682,7 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
|
|||
list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
|
||||
ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
|
||||
ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
|
||||
kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
|
||||
kmem_cache_free(l2arc_hdr_cache, l2hdr);
|
||||
arc_space_return(L2HDR_SIZE, ARC_SPACE_L2HDRS);
|
||||
if (hdr->b_state == arc_l2c_only)
|
||||
l2arc_hdr_stat_remove();
|
||||
|
@ -3684,7 +3688,7 @@ arc_release(arc_buf_t *buf, void *tag)
|
|||
if (l2hdr) {
|
||||
ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
|
||||
list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
|
||||
kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
|
||||
kmem_cache_free(l2arc_hdr_cache, l2hdr);
|
||||
arc_space_return(L2HDR_SIZE, ARC_SPACE_L2HDRS);
|
||||
ARCSTAT_INCR(arcstat_l2_size, -buf_size);
|
||||
mutex_exit(&l2arc_buflist_mtx);
|
||||
|
@ -4572,7 +4576,7 @@ l2arc_write_done(zio_t *zio)
|
|||
list_remove(buflist, ab);
|
||||
ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
|
||||
ab->b_l2hdr = NULL;
|
||||
kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
|
||||
kmem_cache_free(l2arc_hdr_cache, abl2);
|
||||
arc_space_return(L2HDR_SIZE, ARC_SPACE_L2HDRS);
|
||||
ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
|
||||
}
|
||||
|
@ -4827,7 +4831,7 @@ top:
|
|||
abl2 = ab->b_l2hdr;
|
||||
ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
|
||||
ab->b_l2hdr = NULL;
|
||||
kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
|
||||
kmem_cache_free(l2arc_hdr_cache, abl2);
|
||||
arc_space_return(L2HDR_SIZE, ARC_SPACE_L2HDRS);
|
||||
ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
|
||||
}
|
||||
|
@ -4973,9 +4977,9 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
|
|||
/*
|
||||
* Create and add a new L2ARC header.
|
||||
*/
|
||||
l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t),
|
||||
KM_PUSHPAGE);
|
||||
l2hdr = kmem_cache_alloc(l2arc_hdr_cache, KM_PUSHPAGE);
|
||||
l2hdr->b_dev = dev;
|
||||
l2hdr->b_daddr = 0;
|
||||
arc_space_consume(L2HDR_SIZE, ARC_SPACE_L2HDRS);
|
||||
|
||||
ab->b_flags |= ARC_L2_WRITING;
|
||||
|
|
|
@ -37,6 +37,9 @@
|
|||
#include <sys/zio_compress.h>
|
||||
#include <sys/dsl_scan.h>
|
||||
|
||||
static kmem_cache_t *ddt_cache;
|
||||
static kmem_cache_t *ddt_entry_cache;
|
||||
|
||||
/*
|
||||
* Enable/disable prefetching of dedup-ed blocks which are going to be freed.
|
||||
*/
|
||||
|
@ -515,7 +518,6 @@ ddt_get_dedup_stats(spa_t *spa, ddt_stat_t *dds_total)
|
|||
{
|
||||
ddt_histogram_t *ddh_total;
|
||||
|
||||
/* XXX: Move to a slab */
|
||||
ddh_total = kmem_zalloc(sizeof (ddt_histogram_t), KM_PUSHPAGE);
|
||||
ddt_get_dedup_histogram(spa, ddh_total);
|
||||
ddt_histogram_stat(dds_total, ddh_total);
|
||||
|
@ -659,13 +661,29 @@ ddt_exit(ddt_t *ddt)
|
|||
mutex_exit(&ddt->ddt_lock);
|
||||
}
|
||||
|
||||
void
|
||||
ddt_init(void)
|
||||
{
|
||||
ddt_cache = kmem_cache_create("ddt_cache",
|
||||
sizeof (ddt_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
|
||||
ddt_entry_cache = kmem_cache_create("ddt_entry_cache",
|
||||
sizeof (ddt_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
|
||||
}
|
||||
|
||||
void
|
||||
ddt_fini(void)
|
||||
{
|
||||
kmem_cache_destroy(ddt_entry_cache);
|
||||
kmem_cache_destroy(ddt_cache);
|
||||
}
|
||||
|
||||
static ddt_entry_t *
|
||||
ddt_alloc(const ddt_key_t *ddk)
|
||||
{
|
||||
ddt_entry_t *dde;
|
||||
|
||||
/* XXX: Move to a slab */
|
||||
dde = kmem_zalloc(sizeof (ddt_entry_t), KM_PUSHPAGE);
|
||||
dde = kmem_cache_alloc(ddt_entry_cache, KM_PUSHPAGE);
|
||||
bzero(dde, sizeof (ddt_entry_t));
|
||||
cv_init(&dde->dde_cv, NULL, CV_DEFAULT, NULL);
|
||||
|
||||
dde->dde_key = *ddk;
|
||||
|
@ -688,7 +706,7 @@ ddt_free(ddt_entry_t *dde)
|
|||
DDK_GET_PSIZE(&dde->dde_key));
|
||||
|
||||
cv_destroy(&dde->dde_cv);
|
||||
kmem_free(dde, sizeof (*dde));
|
||||
kmem_cache_free(ddt_entry_cache, dde);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -813,8 +831,8 @@ ddt_table_alloc(spa_t *spa, enum zio_checksum c)
|
|||
{
|
||||
ddt_t *ddt;
|
||||
|
||||
/* XXX: Move to a slab */
|
||||
ddt = kmem_zalloc(sizeof (*ddt), KM_PUSHPAGE | KM_NODEBUG);
|
||||
ddt = kmem_cache_alloc(ddt_cache, KM_PUSHPAGE | KM_NODEBUG);
|
||||
bzero(ddt, sizeof (ddt_t));
|
||||
|
||||
mutex_init(&ddt->ddt_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
avl_create(&ddt->ddt_tree, ddt_entry_compare,
|
||||
|
@ -836,7 +854,7 @@ ddt_table_free(ddt_t *ddt)
|
|||
avl_destroy(&ddt->ddt_tree);
|
||||
avl_destroy(&ddt->ddt_repair_tree);
|
||||
mutex_destroy(&ddt->ddt_lock);
|
||||
kmem_free(ddt, sizeof (*ddt));
|
||||
kmem_cache_free(ddt_cache, ddt);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -916,20 +934,20 @@ ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp)
|
|||
return (B_TRUE);
|
||||
|
||||
ddt = spa->spa_ddt[BP_GET_CHECKSUM(bp)];
|
||||
dde = kmem_alloc(sizeof (ddt_entry_t), KM_PUSHPAGE);
|
||||
dde = kmem_cache_alloc(ddt_entry_cache, KM_PUSHPAGE);
|
||||
|
||||
ddt_key_fill(&(dde->dde_key), bp);
|
||||
|
||||
for (type = 0; type < DDT_TYPES; type++) {
|
||||
for (class = 0; class <= max_class; class++) {
|
||||
if (ddt_object_lookup(ddt, type, class, dde) == 0) {
|
||||
kmem_free(dde, sizeof (ddt_entry_t));
|
||||
kmem_cache_free(ddt_entry_cache, dde);
|
||||
return (B_TRUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kmem_free(dde, sizeof (ddt_entry_t));
|
||||
kmem_cache_free(ddt_entry_cache, dde);
|
||||
return (B_FALSE);
|
||||
}
|
||||
|
||||
|
|
|
@ -1653,6 +1653,7 @@ spa_init(int mode)
|
|||
refcount_init();
|
||||
unique_init();
|
||||
space_map_init();
|
||||
ddt_init();
|
||||
zio_init();
|
||||
dmu_init();
|
||||
zil_init();
|
||||
|
@ -1675,6 +1676,7 @@ spa_fini(void)
|
|||
zil_fini();
|
||||
dmu_fini();
|
||||
zio_fini();
|
||||
ddt_fini();
|
||||
space_map_fini();
|
||||
unique_fini();
|
||||
refcount_fini();
|
||||
|
|
Loading…
Reference in New Issue