Scale the dbuf cache with arc_c
Commit d3c2ae1
introduced a dbuf cache with a default size of the
minimum of 100M or 1/32 maximum ARC size. (These figures may be adjusted
using dbuf_cache_max_bytes and dbuf_cache_max_shift.) The dbuf cache
is counted as metadata for the purposes of ARC size calculations.
On a 1GB box the ARC maximum size defaults to c_max 493M which gives a
dbuf cache default minimum size of 15.4M, and the ARC metadata defaults
to minimum 16M. I.e. the dbuf cache is an significant proportion of the
minimum metadata size. With other overheads involved this actually means
the ARC metadata doesn't get down to the minimum.
This patch dynamically scales the dbuf cache to the target ARC size
instead of statically scaling it to the maximum ARC size. (The scale is
still set by dbuf_cache_max_shift and the maximum size is still fixed by
dbuf_cache_max_bytes.) Using the target ARC size rather than the current
ARC size is done to help the ARC reach the target rather than simply
focusing on the current size.
Reviewed-by: Chunwei Chen <tuxoko@gmail.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: George Melikov <mail@gmelikov.ru>
Signed-off-by: Chris Dunlop <chris@onthe.net.au>
Issue #6506
Closes #6561
This commit is contained in:
parent
edd7c24623
commit
25d232f407
|
@ -248,7 +248,7 @@ void arc_flush(spa_t *spa, boolean_t retry);
|
||||||
void arc_tempreserve_clear(uint64_t reserve);
|
void arc_tempreserve_clear(uint64_t reserve);
|
||||||
int arc_tempreserve_space(uint64_t reserve, uint64_t txg);
|
int arc_tempreserve_space(uint64_t reserve, uint64_t txg);
|
||||||
|
|
||||||
uint64_t arc_max_bytes(void);
|
uint64_t arc_target_bytes(void);
|
||||||
void arc_init(void);
|
void arc_init(void);
|
||||||
void arc_fini(void);
|
void arc_fini(void);
|
||||||
|
|
||||||
|
|
|
@ -6475,9 +6475,9 @@ arc_state_fini(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t
|
uint64_t
|
||||||
arc_max_bytes(void)
|
arc_target_bytes(void)
|
||||||
{
|
{
|
||||||
return (arc_c_max);
|
return (arc_c);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
|
|
@ -464,24 +464,35 @@ dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
|
||||||
multilist_get_num_sublists(ml));
|
multilist_get_num_sublists(ml));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long
|
||||||
|
dbuf_cache_target_bytes(void)
|
||||||
|
{
|
||||||
|
return MIN(dbuf_cache_max_bytes,
|
||||||
|
arc_target_bytes() >> dbuf_cache_max_shift);
|
||||||
|
}
|
||||||
|
|
||||||
static inline boolean_t
|
static inline boolean_t
|
||||||
dbuf_cache_above_hiwater(void)
|
dbuf_cache_above_hiwater(void)
|
||||||
{
|
{
|
||||||
|
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
|
||||||
|
|
||||||
uint64_t dbuf_cache_hiwater_bytes =
|
uint64_t dbuf_cache_hiwater_bytes =
|
||||||
(dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100;
|
(dbuf_cache_target * dbuf_cache_hiwater_pct) / 100;
|
||||||
|
|
||||||
return (refcount_count(&dbuf_cache_size) >
|
return (refcount_count(&dbuf_cache_size) >
|
||||||
dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes);
|
dbuf_cache_target + dbuf_cache_hiwater_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline boolean_t
|
static inline boolean_t
|
||||||
dbuf_cache_above_lowater(void)
|
dbuf_cache_above_lowater(void)
|
||||||
{
|
{
|
||||||
|
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
|
||||||
|
|
||||||
uint64_t dbuf_cache_lowater_bytes =
|
uint64_t dbuf_cache_lowater_bytes =
|
||||||
(dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100;
|
(dbuf_cache_target * dbuf_cache_lowater_pct) / 100;
|
||||||
|
|
||||||
return (refcount_count(&dbuf_cache_size) >
|
return (refcount_count(&dbuf_cache_size) >
|
||||||
dbuf_cache_max_bytes - dbuf_cache_lowater_bytes);
|
dbuf_cache_target - dbuf_cache_lowater_bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -601,7 +612,7 @@ dbuf_evict_notify(void)
|
||||||
* because it's OK to occasionally make the wrong decision here,
|
* because it's OK to occasionally make the wrong decision here,
|
||||||
* and grabbing the lock results in massive lock contention.
|
* and grabbing the lock results in massive lock contention.
|
||||||
*/
|
*/
|
||||||
if (refcount_count(&dbuf_cache_size) > dbuf_cache_max_bytes) {
|
if (refcount_count(&dbuf_cache_size) > dbuf_cache_target_bytes()) {
|
||||||
if (dbuf_cache_above_hiwater())
|
if (dbuf_cache_above_hiwater())
|
||||||
dbuf_evict_one();
|
dbuf_evict_one();
|
||||||
cv_signal(&dbuf_evict_cv);
|
cv_signal(&dbuf_evict_cv);
|
||||||
|
@ -658,7 +669,7 @@ retry:
|
||||||
* dbuf cache to 1/32nd (default) of the size of the ARC.
|
* dbuf cache to 1/32nd (default) of the size of the ARC.
|
||||||
*/
|
*/
|
||||||
dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes,
|
dbuf_cache_max_bytes = MIN(dbuf_cache_max_bytes,
|
||||||
arc_max_bytes() >> dbuf_cache_max_shift);
|
arc_target_bytes() >> dbuf_cache_max_shift);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All entries are queued via taskq_dispatch_ent(), so min/maxalloc
|
* All entries are queued via taskq_dispatch_ent(), so min/maxalloc
|
||||||
|
|
Loading…
Reference in New Issue