make zil max block size tunable
We've observed that on some highly fragmented pools, most metaslab allocations are small (~2-8KB), but there are some large, 128K allocations. The large allocations are for ZIL blocks. If there is a lot of fragmentation, the large allocations can be hard to satisfy. The most common impact of this is that we need to check (and thus load) lots of metaslabs from the ZIL allocation code path, causing sync writes to wait for metaslabs to load, which can take a second or more. In the worst case, we may not be able to satisfy the allocation, in which case the ZIL will resort to txg_wait_synced() to ensure the change is on disk. To provide a workaround for this, this change adds a tunable that can reduce the size of ZIL blocks. External-issue: DLPX-61719 Reviewed-by: George Wilson <george.wilson@delphix.com> Reviewed-by: Paul Dagnelie <pcd@delphix.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Matthew Ahrens <mahrens@delphix.com> Closes #8865
This commit is contained in:
parent
5a902f5aaa
commit
b8738257c2
|
@ -1692,7 +1692,7 @@ ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
|
||||||
if (zil_replaying(zd->zd_zilog, tx))
|
if (zil_replaying(zd->zd_zilog, tx))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (lr->lr_length > ZIL_MAX_LOG_DATA)
|
if (lr->lr_length > zil_max_log_data(zd->zd_zilog))
|
||||||
write_state = WR_INDIRECT;
|
write_state = WR_INDIRECT;
|
||||||
|
|
||||||
itx = zil_itx_create(TX_WRITE,
|
itx = zil_itx_create(TX_WRITE,
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
|
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Portions Copyright 2010 Robert Milkowski */
|
/* Portions Copyright 2010 Robert Milkowski */
|
||||||
|
@ -515,6 +515,9 @@ extern void zil_set_sync(zilog_t *zilog, uint64_t syncval);
|
||||||
|
|
||||||
extern void zil_set_logbias(zilog_t *zilog, uint64_t slogval);
|
extern void zil_set_logbias(zilog_t *zilog, uint64_t slogval);
|
||||||
|
|
||||||
|
extern uint64_t zil_max_copied_data(zilog_t *zilog);
|
||||||
|
extern uint64_t zil_max_log_data(zilog_t *zilog);
|
||||||
|
|
||||||
extern int zil_replay_disable;
|
extern int zil_replay_disable;
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
|
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Portions Copyright 2010 Robert Milkowski */
|
/* Portions Copyright 2010 Robert Milkowski */
|
||||||
|
@ -209,6 +209,13 @@ struct zilog {
|
||||||
uint_t zl_prev_rotor; /* rotor for zl_prev[] */
|
uint_t zl_prev_rotor; /* rotor for zl_prev[] */
|
||||||
txg_node_t zl_dirty_link; /* protected by dp_dirty_zilogs list */
|
txg_node_t zl_dirty_link; /* protected by dp_dirty_zilogs list */
|
||||||
uint64_t zl_dirty_max_txg; /* highest txg used to dirty zilog */
|
uint64_t zl_dirty_max_txg; /* highest txg used to dirty zilog */
|
||||||
|
/*
|
||||||
|
* Max block size for this ZIL. Note that this can not be changed
|
||||||
|
* while the ZIL is in use because consumers (ZPL/zvol) need to take
|
||||||
|
* this into account when deciding between WR_COPIED and WR_NEED_COPY
|
||||||
|
* (see zil_max_copied_data()).
|
||||||
|
*/
|
||||||
|
uint64_t zl_max_block_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
typedef struct zil_bp_node {
|
typedef struct zil_bp_node {
|
||||||
|
@ -216,26 +223,6 @@ typedef struct zil_bp_node {
|
||||||
avl_node_t zn_node;
|
avl_node_t zn_node;
|
||||||
} zil_bp_node_t;
|
} zil_bp_node_t;
|
||||||
|
|
||||||
/*
|
|
||||||
* Maximum amount of write data that can be put into single log block.
|
|
||||||
*/
|
|
||||||
#define ZIL_MAX_LOG_DATA (SPA_OLD_MAXBLOCKSIZE - sizeof (zil_chain_t) - \
|
|
||||||
sizeof (lr_write_t))
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Maximum amount of log space we agree to waste to reduce number of
|
|
||||||
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
|
|
||||||
*/
|
|
||||||
#define ZIL_MAX_WASTE_SPACE (ZIL_MAX_LOG_DATA / 8)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Maximum amount of write data for WR_COPIED. Fall back to WR_NEED_COPY
|
|
||||||
* as more space efficient if we can't fit at least two log records into
|
|
||||||
* maximum sized log block.
|
|
||||||
*/
|
|
||||||
#define ZIL_MAX_COPIED_DATA ((SPA_OLD_MAXBLOCKSIZE - \
|
|
||||||
sizeof (zil_chain_t)) / 2 - sizeof (lr_write_t))
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -2826,6 +2826,18 @@ value of 100% will create a maximum of one thread per cpu.
|
||||||
Default value: \fB100\fR%.
|
Default value: \fB100\fR%.
|
||||||
.RE
|
.RE
|
||||||
|
|
||||||
|
.sp
|
||||||
|
.ne 2
|
||||||
|
.na
|
||||||
|
\fBzil_maxblocksize\fR (int)
|
||||||
|
.ad
|
||||||
|
.RS 12n
|
||||||
|
This sets the maximum block size used by the ZIL. On very fragmented pools,
|
||||||
|
lowering this (typically to 36KB) can improve performance.
|
||||||
|
.sp
|
||||||
|
Default value: \fB131072\fR (128KB).
|
||||||
|
.RE
|
||||||
|
|
||||||
.sp
|
.sp
|
||||||
.ne 2
|
.ne 2
|
||||||
.na
|
.na
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
*/
|
*/
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2015 by Delphix. All rights reserved.
|
* Copyright (c) 2015, 2018 by Delphix. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
@ -528,7 +528,14 @@ zfs_log_write(zilog_t *zilog, dmu_tx_t *tx, int txtype,
|
||||||
itx_wr_state_t wr_state = write_state;
|
itx_wr_state_t wr_state = write_state;
|
||||||
ssize_t len = resid;
|
ssize_t len = resid;
|
||||||
|
|
||||||
if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
|
/*
|
||||||
|
* A WR_COPIED record must fit entirely in one log block.
|
||||||
|
* Large writes can use WR_NEED_COPY, which the ZIL will
|
||||||
|
* split into multiple records across several log blocks
|
||||||
|
* if necessary.
|
||||||
|
*/
|
||||||
|
if (wr_state == WR_COPIED &&
|
||||||
|
resid > zil_max_copied_data(zilog))
|
||||||
wr_state = WR_NEED_COPY;
|
wr_state = WR_NEED_COPY;
|
||||||
else if (wr_state == WR_INDIRECT)
|
else if (wr_state == WR_INDIRECT)
|
||||||
len = MIN(blocksize - P2PHASE(off, blocksize), resid);
|
len = MIN(blocksize - P2PHASE(off, blocksize), resid);
|
||||||
|
|
|
@ -1423,6 +1423,13 @@ uint64_t zil_block_buckets[] = {
|
||||||
UINT64_MAX
|
UINT64_MAX
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maximum block size used by the ZIL. This is picked up when the ZIL is
|
||||||
|
* initialized. Otherwise this should not be used directly; see
|
||||||
|
* zl_max_block_size instead.
|
||||||
|
*/
|
||||||
|
int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Start a log block write and advance to the next log block.
|
* Start a log block write and advance to the next log block.
|
||||||
* Calls are serialized.
|
* Calls are serialized.
|
||||||
|
@ -1499,9 +1506,7 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
|
||||||
zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
|
zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
|
||||||
for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
|
for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
|
||||||
continue;
|
continue;
|
||||||
zil_blksz = zil_block_buckets[i];
|
zil_blksz = MIN(zil_block_buckets[i], zilog->zl_max_block_size);
|
||||||
if (zil_blksz == UINT64_MAX)
|
|
||||||
zil_blksz = SPA_OLD_MAXBLOCKSIZE;
|
|
||||||
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
|
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
|
||||||
for (i = 0; i < ZIL_PREV_BLKS; i++)
|
for (i = 0; i < ZIL_PREV_BLKS; i++)
|
||||||
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
|
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
|
||||||
|
@ -1562,13 +1567,47 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
|
||||||
return (nlwb);
|
return (nlwb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maximum amount of write data that can be put into single log block.
|
||||||
|
*/
|
||||||
|
uint64_t
|
||||||
|
zil_max_log_data(zilog_t *zilog)
|
||||||
|
{
|
||||||
|
return (zilog->zl_max_block_size -
|
||||||
|
sizeof (zil_chain_t) - sizeof (lr_write_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maximum amount of log space we agree to waste to reduce number of
|
||||||
|
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
|
||||||
|
*/
|
||||||
|
static inline uint64_t
|
||||||
|
zil_max_waste_space(zilog_t *zilog)
|
||||||
|
{
|
||||||
|
return (zil_max_log_data(zilog) / 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Maximum amount of write data for WR_COPIED. For correctness, consumers
|
||||||
|
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
|
||||||
|
* maximum sized log block, because each WR_COPIED record must fit in a
|
||||||
|
* single log block. For space efficiency, we want to fit two records into a
|
||||||
|
* max-sized log block.
|
||||||
|
*/
|
||||||
|
uint64_t
|
||||||
|
zil_max_copied_data(zilog_t *zilog)
|
||||||
|
{
|
||||||
|
return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 -
|
||||||
|
sizeof (lr_write_t));
|
||||||
|
}
|
||||||
|
|
||||||
static lwb_t *
|
static lwb_t *
|
||||||
zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
|
zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
|
||||||
{
|
{
|
||||||
lr_t *lrcb, *lrc;
|
lr_t *lrcb, *lrc;
|
||||||
lr_write_t *lrwb, *lrw;
|
lr_write_t *lrwb, *lrw;
|
||||||
char *lr_buf;
|
char *lr_buf;
|
||||||
uint64_t dlen, dnow, lwb_sp, reclen, txg;
|
uint64_t dlen, dnow, lwb_sp, reclen, txg, max_log_data;
|
||||||
|
|
||||||
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
|
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
|
||||||
ASSERT3P(lwb, !=, NULL);
|
ASSERT3P(lwb, !=, NULL);
|
||||||
|
@ -1617,15 +1656,27 @@ cont:
|
||||||
* For WR_NEED_COPY optimize layout for minimal number of chunks.
|
* For WR_NEED_COPY optimize layout for minimal number of chunks.
|
||||||
*/
|
*/
|
||||||
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
|
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
|
||||||
|
max_log_data = zil_max_log_data(zilog);
|
||||||
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
|
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
|
||||||
lwb_sp < ZIL_MAX_WASTE_SPACE && (dlen % ZIL_MAX_LOG_DATA == 0 ||
|
lwb_sp < zil_max_waste_space(zilog) &&
|
||||||
lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) {
|
(dlen % max_log_data == 0 ||
|
||||||
|
lwb_sp < reclen + dlen % max_log_data))) {
|
||||||
lwb = zil_lwb_write_issue(zilog, lwb);
|
lwb = zil_lwb_write_issue(zilog, lwb);
|
||||||
if (lwb == NULL)
|
if (lwb == NULL)
|
||||||
return (NULL);
|
return (NULL);
|
||||||
zil_lwb_write_open(zilog, lwb);
|
zil_lwb_write_open(zilog, lwb);
|
||||||
ASSERT(LWB_EMPTY(lwb));
|
ASSERT(LWB_EMPTY(lwb));
|
||||||
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
|
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There must be enough space in the new, empty log block to
|
||||||
|
* hold reclen. For WR_COPIED, we need to fit the whole
|
||||||
|
* record in one block, and reclen is the header size + the
|
||||||
|
* data size. For WR_NEED_COPY, we can create multiple
|
||||||
|
* records, splitting the data into multiple blocks, so we
|
||||||
|
* only need to fit one word of data per block; in this case
|
||||||
|
* reclen is just the header size (no data).
|
||||||
|
*/
|
||||||
ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
|
ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3124,6 +3175,7 @@ zil_alloc(objset_t *os, zil_header_t *zh_phys)
|
||||||
zilog->zl_dirty_max_txg = 0;
|
zilog->zl_dirty_max_txg = 0;
|
||||||
zilog->zl_last_lwb_opened = NULL;
|
zilog->zl_last_lwb_opened = NULL;
|
||||||
zilog->zl_last_lwb_latency = 0;
|
zilog->zl_last_lwb_latency = 0;
|
||||||
|
zilog->zl_max_block_size = zil_maxblocksize;
|
||||||
|
|
||||||
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
|
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||||
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
|
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||||
|
@ -3637,5 +3689,8 @@ MODULE_PARM_DESC(zil_nocacheflush, "Disable ZIL cache flushes");
|
||||||
|
|
||||||
module_param(zil_slog_bulk, ulong, 0644);
|
module_param(zil_slog_bulk, ulong, 0644);
|
||||||
MODULE_PARM_DESC(zil_slog_bulk, "Limit in bytes slog sync writes per commit");
|
MODULE_PARM_DESC(zil_slog_bulk, "Limit in bytes slog sync writes per commit");
|
||||||
|
|
||||||
|
module_param(zil_maxblocksize, int, 0644);
|
||||||
|
MODULE_PARM_DESC(zil_maxblocksize, "Limit in bytes of ZIL log block size");
|
||||||
/* END CSTYLED */
|
/* END CSTYLED */
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -684,7 +684,7 @@ zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
|
||||||
itx_wr_state_t wr_state = write_state;
|
itx_wr_state_t wr_state = write_state;
|
||||||
ssize_t len = size;
|
ssize_t len = size;
|
||||||
|
|
||||||
if (wr_state == WR_COPIED && size > ZIL_MAX_COPIED_DATA)
|
if (wr_state == WR_COPIED && size > zil_max_copied_data(zilog))
|
||||||
wr_state = WR_NEED_COPY;
|
wr_state = WR_NEED_COPY;
|
||||||
else if (wr_state == WR_INDIRECT)
|
else if (wr_state == WR_INDIRECT)
|
||||||
len = MIN(blocksize - P2PHASE(offset, blocksize), size);
|
len = MIN(blocksize - P2PHASE(offset, blocksize), size);
|
||||||
|
|
Loading…
Reference in New Issue