2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2010-05-28 20:45:14 +00:00
|
|
|
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
|
2011-11-08 00:26:52 +00:00
|
|
|
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
|
2017-03-21 01:36:00 +00:00
|
|
|
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
|
2013-01-23 09:54:30 +00:00
|
|
|
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
|
2017-04-13 16:40:00 +00:00
|
|
|
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
|
|
|
|
* Copyright 2016 Toomas Soome <tsoome@me.com>
|
2011-11-08 00:26:52 +00:00
|
|
|
*/
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
#ifndef _ZIO_H
|
|
|
|
#define _ZIO_H
|
|
|
|
|
2015-12-22 01:31:57 +00:00
|
|
|
#include <sys/zio_priority.h>
|
2008-11-20 20:01:55 +00:00
|
|
|
#include <sys/zfs_context.h>
|
|
|
|
#include <sys/spa.h>
|
|
|
|
#include <sys/txg.h>
|
|
|
|
#include <sys/avl.h>
|
|
|
|
#include <sys/fs/zfs.h>
|
|
|
|
#include <sys/zio_impl.h>
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
/*
|
|
|
|
* Embedded checksum
|
|
|
|
*/
|
|
|
|
#define ZEC_MAGIC 0x210da7ab10c7a11ULL
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
typedef struct zio_eck {
|
|
|
|
uint64_t zec_magic; /* for validation, endianness */
|
|
|
|
zio_cksum_t zec_cksum; /* 256-bit checksum */
|
|
|
|
} zio_eck_t;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Gang block headers are self-checksumming and contain an array
|
|
|
|
* of block pointers.
|
|
|
|
*/
|
|
|
|
#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
|
|
|
|
#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
|
2010-05-28 20:45:14 +00:00
|
|
|
sizeof (zio_eck_t)) / sizeof (blkptr_t))
|
2008-11-20 20:01:55 +00:00
|
|
|
#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
|
2010-05-28 20:45:14 +00:00
|
|
|
sizeof (zio_eck_t) - \
|
2008-11-20 20:01:55 +00:00
|
|
|
(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
|
|
|
|
sizeof (uint64_t))
|
|
|
|
|
|
|
|
typedef struct zio_gbh {
|
|
|
|
blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
|
|
|
|
uint64_t zg_filler[SPA_GBH_FILLER];
|
2010-05-28 20:45:14 +00:00
|
|
|
zio_eck_t zg_tail;
|
2008-11-20 20:01:55 +00:00
|
|
|
} zio_gbh_phys_t;
|
|
|
|
|
|
|
|
enum zio_checksum {
|
|
|
|
ZIO_CHECKSUM_INHERIT = 0,
|
|
|
|
ZIO_CHECKSUM_ON,
|
|
|
|
ZIO_CHECKSUM_OFF,
|
|
|
|
ZIO_CHECKSUM_LABEL,
|
|
|
|
ZIO_CHECKSUM_GANG_HEADER,
|
|
|
|
ZIO_CHECKSUM_ZILOG,
|
|
|
|
ZIO_CHECKSUM_FLETCHER_2,
|
|
|
|
ZIO_CHECKSUM_FLETCHER_4,
|
|
|
|
ZIO_CHECKSUM_SHA256,
|
2010-05-28 20:45:14 +00:00
|
|
|
ZIO_CHECKSUM_ZILOG2,
|
2016-06-15 22:47:05 +00:00
|
|
|
ZIO_CHECKSUM_NOPARITY,
|
|
|
|
ZIO_CHECKSUM_SHA512,
|
|
|
|
ZIO_CHECKSUM_SKEIN,
|
|
|
|
ZIO_CHECKSUM_EDONR,
|
2008-11-20 20:01:55 +00:00
|
|
|
ZIO_CHECKSUM_FUNCTIONS
|
|
|
|
};
|
|
|
|
|
2014-06-05 21:19:08 +00:00
|
|
|
/*
|
|
|
|
* The number of "legacy" compression functions which can be set on individual
|
|
|
|
* objects.
|
|
|
|
*/
|
|
|
|
#define ZIO_CHECKSUM_LEGACY_FUNCTIONS ZIO_CHECKSUM_ZILOG2
|
|
|
|
|
2009-07-02 22:44:48 +00:00
|
|
|
#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4
|
2008-11-20 20:01:55 +00:00
|
|
|
#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
#define ZIO_CHECKSUM_MASK 0xffULL
|
|
|
|
#define ZIO_CHECKSUM_VERIFY (1 << 8)
|
|
|
|
|
|
|
|
#define ZIO_DEDUPCHECKSUM ZIO_CHECKSUM_SHA256
|
|
|
|
#define ZIO_DEDUPDITTO_MIN 100
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
/* supported encryption algorithms */
|
|
|
|
enum zio_encrypt {
|
|
|
|
ZIO_CRYPT_INHERIT = 0,
|
|
|
|
ZIO_CRYPT_ON,
|
|
|
|
ZIO_CRYPT_OFF,
|
|
|
|
ZIO_CRYPT_AES_128_CCM,
|
|
|
|
ZIO_CRYPT_AES_192_CCM,
|
|
|
|
ZIO_CRYPT_AES_256_CCM,
|
|
|
|
ZIO_CRYPT_AES_128_GCM,
|
|
|
|
ZIO_CRYPT_AES_192_GCM,
|
|
|
|
ZIO_CRYPT_AES_256_GCM,
|
|
|
|
ZIO_CRYPT_FUNCTIONS
|
|
|
|
};
|
|
|
|
|
|
|
|
#define ZIO_CRYPT_ON_VALUE ZIO_CRYPT_AES_256_CCM
|
|
|
|
#define ZIO_CRYPT_DEFAULT ZIO_CRYPT_OFF
|
|
|
|
|
|
|
|
/* macros defining encryption lengths */
|
|
|
|
#define ZIO_OBJSET_MAC_LEN 32
|
|
|
|
#define ZIO_DATA_IV_LEN 12
|
|
|
|
#define ZIO_DATA_SALT_LEN 8
|
|
|
|
#define ZIO_DATA_MAC_LEN 16
|
|
|
|
|
2014-06-05 21:19:08 +00:00
|
|
|
/*
|
|
|
|
* The number of "legacy" compression functions which can be set on individual
|
|
|
|
* objects.
|
|
|
|
*/
|
|
|
|
#define ZIO_COMPRESS_LEGACY_FUNCTIONS ZIO_COMPRESS_LZ4
|
|
|
|
|
2015-07-06 01:55:32 +00:00
|
|
|
/*
|
|
|
|
* The meaning of "compress = on" selected by the compression features enabled
|
|
|
|
* on a given pool.
|
|
|
|
*/
|
|
|
|
#define ZIO_COMPRESS_LEGACY_ON_VALUE ZIO_COMPRESS_LZJB
|
|
|
|
#define ZIO_COMPRESS_LZ4_ON_VALUE ZIO_COMPRESS_LZ4
|
|
|
|
|
|
|
|
#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
#define BOOTFS_COMPRESS_VALID(compress) \
|
|
|
|
((compress) == ZIO_COMPRESS_LZJB || \
|
2013-01-23 09:54:30 +00:00
|
|
|
(compress) == ZIO_COMPRESS_LZ4 || \
|
2016-12-03 07:13:44 +00:00
|
|
|
(compress) == ZIO_COMPRESS_GZIP_1 || \
|
|
|
|
(compress) == ZIO_COMPRESS_GZIP_2 || \
|
|
|
|
(compress) == ZIO_COMPRESS_GZIP_3 || \
|
|
|
|
(compress) == ZIO_COMPRESS_GZIP_4 || \
|
|
|
|
(compress) == ZIO_COMPRESS_GZIP_5 || \
|
|
|
|
(compress) == ZIO_COMPRESS_GZIP_6 || \
|
|
|
|
(compress) == ZIO_COMPRESS_GZIP_7 || \
|
|
|
|
(compress) == ZIO_COMPRESS_GZIP_8 || \
|
|
|
|
(compress) == ZIO_COMPRESS_GZIP_9 || \
|
|
|
|
(compress) == ZIO_COMPRESS_ZLE || \
|
2015-07-06 01:55:32 +00:00
|
|
|
(compress) == ZIO_COMPRESS_ON || \
|
2010-05-28 20:45:14 +00:00
|
|
|
(compress) == ZIO_COMPRESS_OFF)
|
|
|
|
|
2010-10-01 23:54:52 +00:00
|
|
|
/*
|
|
|
|
* Default Linux timeout for a sd device.
|
|
|
|
*/
|
2013-11-01 19:26:11 +00:00
|
|
|
#define ZIO_DELAY_MAX (30 * MILLISEC)
|
2010-10-01 23:54:52 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
#define ZIO_FAILURE_MODE_WAIT 0
|
|
|
|
#define ZIO_FAILURE_MODE_CONTINUE 1
|
|
|
|
#define ZIO_FAILURE_MODE_PANIC 2
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
enum zio_flag {
|
|
|
|
/*
|
|
|
|
* Flags inherited by gang, ddt, and vdev children,
|
|
|
|
* and that must be equal for two zios to aggregate
|
|
|
|
*/
|
|
|
|
ZIO_FLAG_DONT_AGGREGATE = 1 << 0,
|
|
|
|
ZIO_FLAG_IO_REPAIR = 1 << 1,
|
|
|
|
ZIO_FLAG_SELF_HEAL = 1 << 2,
|
|
|
|
ZIO_FLAG_RESILVER = 1 << 3,
|
|
|
|
ZIO_FLAG_SCRUB = 1 << 4,
|
2010-08-26 21:24:34 +00:00
|
|
|
ZIO_FLAG_SCAN_THREAD = 1 << 5,
|
2014-09-22 23:42:03 +00:00
|
|
|
ZIO_FLAG_PHYSICAL = 1 << 6,
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
#define ZIO_FLAG_AGG_INHERIT (ZIO_FLAG_CANFAIL - 1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flags inherited by ddt, gang, and vdev children.
|
|
|
|
*/
|
2014-09-22 23:42:03 +00:00
|
|
|
ZIO_FLAG_CANFAIL = 1 << 7, /* must be first for INHERIT */
|
|
|
|
ZIO_FLAG_SPECULATIVE = 1 << 8,
|
|
|
|
ZIO_FLAG_CONFIG_WRITER = 1 << 9,
|
|
|
|
ZIO_FLAG_DONT_RETRY = 1 << 10,
|
|
|
|
ZIO_FLAG_DONT_CACHE = 1 << 11,
|
|
|
|
ZIO_FLAG_NODATA = 1 << 12,
|
|
|
|
ZIO_FLAG_INDUCE_DAMAGE = 1 << 13,
|
2016-10-14 00:59:18 +00:00
|
|
|
ZIO_FLAG_IO_ALLOCATING = 1 << 14,
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
#define ZIO_FLAG_DDT_INHERIT (ZIO_FLAG_IO_RETRY - 1)
|
|
|
|
#define ZIO_FLAG_GANG_INHERIT (ZIO_FLAG_IO_RETRY - 1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flags inherited by vdev children.
|
|
|
|
*/
|
2016-10-14 00:59:18 +00:00
|
|
|
ZIO_FLAG_IO_RETRY = 1 << 15, /* must be first for INHERIT */
|
|
|
|
ZIO_FLAG_PROBE = 1 << 16,
|
|
|
|
ZIO_FLAG_TRYHARD = 1 << 17,
|
|
|
|
ZIO_FLAG_OPTIONAL = 1 << 18,
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flags not inherited by any children.
|
|
|
|
*/
|
2016-10-14 00:59:18 +00:00
|
|
|
ZIO_FLAG_DONT_QUEUE = 1 << 19, /* must be first for INHERIT */
|
|
|
|
ZIO_FLAG_DONT_PROPAGATE = 1 << 20,
|
|
|
|
ZIO_FLAG_IO_BYPASS = 1 << 21,
|
|
|
|
ZIO_FLAG_IO_REWRITE = 1 << 22,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
ZIO_FLAG_RAW_COMPRESS = 1 << 23,
|
|
|
|
ZIO_FLAG_RAW_ENCRYPT = 1 << 24,
|
|
|
|
ZIO_FLAG_GANG_CHILD = 1 << 25,
|
|
|
|
ZIO_FLAG_DDT_CHILD = 1 << 26,
|
|
|
|
ZIO_FLAG_GODFATHER = 1 << 27,
|
|
|
|
ZIO_FLAG_NOPWRITE = 1 << 28,
|
|
|
|
ZIO_FLAG_REEXECUTED = 1 << 29,
|
|
|
|
ZIO_FLAG_DELEGATED = 1 << 30,
|
|
|
|
ZIO_FLAG_FASTWRITE = 1 << 31,
|
2010-05-28 20:45:14 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define ZIO_FLAG_MUSTSUCCEED 0
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
#define ZIO_FLAG_RAW (ZIO_FLAG_RAW_COMPRESS | ZIO_FLAG_RAW_ENCRYPT)
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
#define ZIO_DDT_CHILD_FLAGS(zio) \
|
|
|
|
(((zio)->io_flags & ZIO_FLAG_DDT_INHERIT) | \
|
|
|
|
ZIO_FLAG_DDT_CHILD | ZIO_FLAG_CANFAIL)
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
#define ZIO_GANG_CHILD_FLAGS(zio) \
|
|
|
|
(((zio)->io_flags & ZIO_FLAG_GANG_INHERIT) | \
|
|
|
|
ZIO_FLAG_GANG_CHILD | ZIO_FLAG_CANFAIL)
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
#define ZIO_VDEV_CHILD_FLAGS(zio) \
|
|
|
|
(((zio)->io_flags & ZIO_FLAG_VDEV_INHERIT) | \
|
|
|
|
ZIO_FLAG_CANFAIL)
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
enum zio_child {
|
|
|
|
ZIO_CHILD_VDEV = 0,
|
|
|
|
ZIO_CHILD_GANG,
|
2010-05-28 20:45:14 +00:00
|
|
|
ZIO_CHILD_DDT,
|
2008-12-03 20:09:06 +00:00
|
|
|
ZIO_CHILD_LOGICAL,
|
|
|
|
ZIO_CHILD_TYPES
|
|
|
|
};
|
|
|
|
|
|
|
|
enum zio_wait_type {
|
|
|
|
ZIO_WAIT_READY = 0,
|
|
|
|
ZIO_WAIT_DONE,
|
|
|
|
ZIO_WAIT_TYPES
|
|
|
|
};
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
/*
|
|
|
|
* We'll take the unused errnos, 'EBADE' and 'EBADR' (from the Convergent
|
|
|
|
* graveyard) to indicate checksum errors and fragmentation.
|
|
|
|
*/
|
|
|
|
#define ECKSUM EBADE
|
|
|
|
#define EFRAGS EBADR
|
|
|
|
|
|
|
|
typedef void zio_done_func_t(zio_t *zio);
|
|
|
|
|
2016-10-14 00:59:18 +00:00
|
|
|
extern int zio_dva_throttle_enabled;
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
extern const char *zio_type_name[ZIO_TYPES];
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A bookmark is a four-tuple <objset, object, level, blkid> that uniquely
|
|
|
|
* identifies any block in the pool. By convention, the meta-objset (MOS)
|
2010-05-28 20:45:14 +00:00
|
|
|
* is objset 0, and the meta-dnode is object 0. This covers all blocks
|
|
|
|
* except root blocks and ZIL blocks, which are defined as follows:
|
2008-11-20 20:01:55 +00:00
|
|
|
*
|
2010-05-28 20:45:14 +00:00
|
|
|
* Root blocks (objset_phys_t) are object 0, level -1: <objset, 0, -1, 0>.
|
|
|
|
* ZIL blocks are bookmarked <objset, 0, -2, blkid == ZIL sequence number>.
|
|
|
|
* dmu_sync()ed ZIL data blocks are bookmarked <objset, object, -2, blkid>.
|
2015-12-22 01:31:57 +00:00
|
|
|
* dnode visit bookmarks are <objset, object id of dnode, -3, 0>.
|
2008-11-20 20:01:55 +00:00
|
|
|
*
|
2010-05-28 20:45:14 +00:00
|
|
|
* Note: this structure is called a bookmark because its original purpose
|
|
|
|
* was to remember where to resume a pool-wide traverse.
|
2008-11-20 20:01:55 +00:00
|
|
|
*
|
2014-06-25 18:37:59 +00:00
|
|
|
* Note: this structure is passed between userland and the kernel, and is
|
|
|
|
* stored on disk (by virtue of being incorporated into other on-disk
|
|
|
|
* structures, e.g. dsl_scan_phys_t).
|
2008-11-20 20:01:55 +00:00
|
|
|
*/
|
2014-06-25 18:37:59 +00:00
|
|
|
struct zbookmark_phys {
|
2008-11-20 20:01:55 +00:00
|
|
|
uint64_t zb_objset;
|
|
|
|
uint64_t zb_object;
|
|
|
|
int64_t zb_level;
|
|
|
|
uint64_t zb_blkid;
|
Add visibility in to arc_read
This change is an attempt to add visibility into the arc_read calls
occurring on a system, in real time. To do this, a list was added to the
in memory SPA data structure for a pool, with each element on the list
corresponding to a call to arc_read. These entries are then exported
through the kstat interface, which can then be interpreted in userspace.
For each arc_read call, the following information is exported:
* A unique identifier (uint64_t)
* The time the entry was added to the list (hrtime_t)
(*not* wall clock time; relative to the other entries on the list)
* The objset ID (uint64_t)
* The object number (uint64_t)
* The indirection level (uint64_t)
* The block ID (uint64_t)
* The name of the function originating the arc_read call (char[24])
* The arc_flags from the arc_read call (uint32_t)
* The PID of the reading thread (pid_t)
* The command or name of thread originating read (char[16])
From this exported information one can see, in real time, exactly what
is being read, what function is generating the read, and whether or not
the read was found to be already cached.
There is still some work to be done, but this should serve as a good
starting point.
Specifically, dbuf_read's are not accounted for in the currently
exported information. Thus, a follow up patch should probably be added
to export these calls that never call into arc_read (they only hit the
dbuf hash table). In addition, it might be nice to create a utility
similar to "arcstat.py" to digest the exported information and display
it in a more readable format. Or perhaps, log the information and allow
for it to be "replayed" at a later time.
Signed-off-by: Prakash Surya <surya1@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2013-09-06 23:09:05 +00:00
|
|
|
};
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
#define SET_BOOKMARK(zb, objset, object, level, blkid) \
|
|
|
|
{ \
|
|
|
|
(zb)->zb_objset = objset; \
|
|
|
|
(zb)->zb_object = object; \
|
|
|
|
(zb)->zb_level = level; \
|
|
|
|
(zb)->zb_blkid = blkid; \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ZB_DESTROYED_OBJSET (-1ULL)
|
|
|
|
|
|
|
|
#define ZB_ROOT_OBJECT (0ULL)
|
|
|
|
#define ZB_ROOT_LEVEL (-1LL)
|
|
|
|
#define ZB_ROOT_BLKID (0ULL)
|
|
|
|
|
|
|
|
#define ZB_ZIL_OBJECT (0ULL)
|
|
|
|
#define ZB_ZIL_LEVEL (-2LL)
|
|
|
|
|
2015-12-22 01:31:57 +00:00
|
|
|
#define ZB_DNODE_LEVEL (-3LL)
|
|
|
|
#define ZB_DNODE_BLKID (0ULL)
|
|
|
|
|
2012-12-13 23:24:15 +00:00
|
|
|
#define ZB_IS_ZERO(zb) \
|
|
|
|
((zb)->zb_objset == 0 && (zb)->zb_object == 0 && \
|
|
|
|
(zb)->zb_level == 0 && (zb)->zb_blkid == 0)
|
|
|
|
#define ZB_IS_ROOT(zb) \
|
|
|
|
((zb)->zb_object == ZB_ROOT_OBJECT && \
|
|
|
|
(zb)->zb_level == ZB_ROOT_LEVEL && \
|
|
|
|
(zb)->zb_blkid == ZB_ROOT_BLKID)
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
typedef struct zio_prop {
|
|
|
|
enum zio_checksum zp_checksum;
|
|
|
|
enum zio_compress zp_compress;
|
|
|
|
dmu_object_type_t zp_type;
|
|
|
|
uint8_t zp_level;
|
2010-05-28 20:45:14 +00:00
|
|
|
uint8_t zp_copies;
|
2013-05-10 19:47:54 +00:00
|
|
|
boolean_t zp_dedup;
|
|
|
|
boolean_t zp_dedup_verify;
|
|
|
|
boolean_t zp_nopwrite;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
boolean_t zp_encrypt;
|
|
|
|
boolean_t zp_byteorder;
|
|
|
|
uint8_t zp_salt[ZIO_DATA_SALT_LEN];
|
|
|
|
uint8_t zp_iv[ZIO_DATA_IV_LEN];
|
|
|
|
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
|
2008-12-03 20:09:06 +00:00
|
|
|
} zio_prop_t;
|
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
typedef struct zio_cksum_report zio_cksum_report_t;
|
|
|
|
|
|
|
|
typedef void zio_cksum_finish_f(zio_cksum_report_t *rep,
|
2017-01-05 19:10:07 +00:00
|
|
|
const abd_t *good_data);
|
2010-05-28 20:45:14 +00:00
|
|
|
typedef void zio_cksum_free_f(void *cbdata, size_t size);
|
|
|
|
|
|
|
|
struct zio_bad_cksum; /* defined in zio_checksum.h */
|
2012-12-13 23:24:15 +00:00
|
|
|
struct dnode_phys;
|
2016-07-22 15:52:49 +00:00
|
|
|
struct abd;
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
struct zio_cksum_report {
|
|
|
|
struct zio_cksum_report *zcr_next;
|
|
|
|
nvlist_t *zcr_ereport;
|
|
|
|
nvlist_t *zcr_detector;
|
|
|
|
void *zcr_cbdata;
|
|
|
|
size_t zcr_cbinfo; /* passed to zcr_free() */
|
|
|
|
uint64_t zcr_align;
|
|
|
|
uint64_t zcr_length;
|
|
|
|
zio_cksum_finish_f *zcr_finish;
|
|
|
|
zio_cksum_free_f *zcr_free;
|
|
|
|
|
|
|
|
/* internal use only */
|
|
|
|
struct zio_bad_cksum *zcr_ckinfo; /* information from failure */
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef void zio_vsd_cksum_report_f(zio_t *zio, zio_cksum_report_t *zcr,
|
|
|
|
void *arg);
|
|
|
|
|
|
|
|
zio_vsd_cksum_report_f zio_vsd_default_cksum_report;
|
|
|
|
|
|
|
|
typedef struct zio_vsd_ops {
|
|
|
|
zio_done_func_t *vsd_free;
|
|
|
|
zio_vsd_cksum_report_f *vsd_cksum_report;
|
|
|
|
} zio_vsd_ops_t;
|
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
typedef struct zio_gang_node {
|
|
|
|
zio_gbh_phys_t *gn_gbh;
|
|
|
|
struct zio_gang_node *gn_child[SPA_GBH_NBLKPTRS];
|
|
|
|
} zio_gang_node_t;
|
|
|
|
|
|
|
|
typedef zio_t *zio_gang_issue_func_t(zio_t *zio, blkptr_t *bp,
|
2016-07-22 15:52:49 +00:00
|
|
|
zio_gang_node_t *gn, struct abd *data, uint64_t offset);
|
2008-12-03 20:09:06 +00:00
|
|
|
|
2016-07-22 15:52:49 +00:00
|
|
|
typedef void zio_transform_func_t(zio_t *zio, struct abd *data, uint64_t size);
|
2008-12-03 20:09:06 +00:00
|
|
|
|
|
|
|
typedef struct zio_transform {
|
2016-07-22 15:52:49 +00:00
|
|
|
struct abd *zt_orig_abd;
|
2008-12-03 20:09:06 +00:00
|
|
|
uint64_t zt_orig_size;
|
|
|
|
uint64_t zt_bufsize;
|
|
|
|
zio_transform_func_t *zt_transform;
|
|
|
|
struct zio_transform *zt_next;
|
|
|
|
} zio_transform_t;
|
|
|
|
|
|
|
|
typedef int zio_pipe_stage_t(zio_t *zio);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The io_reexecute flags are distinct from io_flags because the child must
|
|
|
|
* be able to propagate them to the parent. The normal io_flags are local
|
|
|
|
* to the zio, not protected by any lock, and not modifiable by children;
|
|
|
|
* the reexecute flags are protected by io_lock, modifiable by children,
|
|
|
|
* and always propagated -- even when ZIO_FLAG_DONT_PROPAGATE is set.
|
|
|
|
*/
|
|
|
|
#define ZIO_REEXECUTE_NOW 0x01
|
|
|
|
#define ZIO_REEXECUTE_SUSPEND 0x02
|
|
|
|
|
2017-01-12 19:52:56 +00:00
|
|
|
typedef struct zio_alloc_list {
|
|
|
|
list_t zal_list;
|
|
|
|
uint64_t zal_size;
|
|
|
|
} zio_alloc_list_t;
|
|
|
|
|
2009-02-18 20:51:31 +00:00
|
|
|
typedef struct zio_link {
|
|
|
|
zio_t *zl_parent;
|
|
|
|
zio_t *zl_child;
|
|
|
|
list_node_t zl_parent_node;
|
|
|
|
list_node_t zl_child_node;
|
|
|
|
} zio_link_t;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
struct zio {
|
|
|
|
/* Core information about this I/O */
|
2014-06-25 18:37:59 +00:00
|
|
|
zbookmark_phys_t io_bookmark;
|
2008-12-03 20:09:06 +00:00
|
|
|
zio_prop_t io_prop;
|
|
|
|
zio_type_t io_type;
|
|
|
|
enum zio_child io_child_type;
|
|
|
|
int io_cmd;
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
zio_priority_t io_priority;
|
2008-12-03 20:09:06 +00:00
|
|
|
uint8_t io_reexecute;
|
2009-02-18 20:51:31 +00:00
|
|
|
uint8_t io_state[ZIO_WAIT_TYPES];
|
2008-11-20 20:01:55 +00:00
|
|
|
uint64_t io_txg;
|
2008-12-03 20:09:06 +00:00
|
|
|
spa_t *io_spa;
|
2008-11-20 20:01:55 +00:00
|
|
|
blkptr_t *io_bp;
|
2010-05-28 20:45:14 +00:00
|
|
|
blkptr_t *io_bp_override;
|
2008-11-20 20:01:55 +00:00
|
|
|
blkptr_t io_bp_copy;
|
2009-02-18 20:51:31 +00:00
|
|
|
list_t io_parent_list;
|
|
|
|
list_t io_child_list;
|
2008-11-20 20:01:55 +00:00
|
|
|
zio_t *io_logical;
|
2008-12-03 20:09:06 +00:00
|
|
|
zio_transform_t *io_transform_stack;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/* Callback info */
|
2016-05-15 15:02:28 +00:00
|
|
|
zio_done_func_t *io_ready;
|
|
|
|
zio_done_func_t *io_children_ready;
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
zio_done_func_t *io_physdone;
|
2008-11-20 20:01:55 +00:00
|
|
|
zio_done_func_t *io_done;
|
|
|
|
void *io_private;
|
2010-05-28 20:45:14 +00:00
|
|
|
int64_t io_prev_space_delta; /* DMU private */
|
2008-11-20 20:01:55 +00:00
|
|
|
blkptr_t io_bp_orig;
|
2016-07-11 17:45:52 +00:00
|
|
|
/* io_lsize != io_orig_size iff this is a raw write */
|
|
|
|
uint64_t io_lsize;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/* Data represented by this I/O */
|
2016-07-22 15:52:49 +00:00
|
|
|
struct abd *io_abd;
|
|
|
|
struct abd *io_orig_abd;
|
2008-11-20 20:01:55 +00:00
|
|
|
uint64_t io_size;
|
2010-05-28 20:45:14 +00:00
|
|
|
uint64_t io_orig_size;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/* Stuff for the vdev stack */
|
|
|
|
vdev_t *io_vd;
|
|
|
|
void *io_vsd;
|
2010-05-28 20:45:14 +00:00
|
|
|
const zio_vsd_ops_t *io_vsd_ops;
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
uint64_t io_offset;
|
2013-03-21 22:47:36 +00:00
|
|
|
hrtime_t io_timestamp; /* submitted at */
|
2016-10-14 00:59:18 +00:00
|
|
|
hrtime_t io_queued_timestamp;
|
2016-05-23 17:41:29 +00:00
|
|
|
hrtime_t io_target_timestamp;
|
2013-03-21 22:47:36 +00:00
|
|
|
hrtime_t io_delta; /* vdev queue service delta */
|
2016-02-29 18:05:23 +00:00
|
|
|
hrtime_t io_delay; /* Device access time (disk or */
|
|
|
|
/* file). */
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
avl_node_t io_queue_node;
|
2015-04-11 18:51:06 +00:00
|
|
|
avl_node_t io_offset_node;
|
2016-10-14 00:59:18 +00:00
|
|
|
avl_node_t io_alloc_node;
|
2017-01-12 19:52:56 +00:00
|
|
|
zio_alloc_list_t io_alloc_list;
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/* Internal pipeline state */
|
2010-05-28 20:45:14 +00:00
|
|
|
enum zio_flag io_flags;
|
|
|
|
enum zio_stage io_stage;
|
|
|
|
enum zio_stage io_pipeline;
|
|
|
|
enum zio_flag io_orig_flags;
|
|
|
|
enum zio_stage io_orig_stage;
|
|
|
|
enum zio_stage io_orig_pipeline;
|
2016-10-14 00:59:18 +00:00
|
|
|
enum zio_stage io_pipeline_trace;
|
2008-12-03 20:09:06 +00:00
|
|
|
int io_error;
|
|
|
|
int io_child_error[ZIO_CHILD_TYPES];
|
|
|
|
uint64_t io_children[ZIO_CHILD_TYPES][ZIO_WAIT_TYPES];
|
2010-05-28 20:45:14 +00:00
|
|
|
uint64_t io_child_count;
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
uint64_t io_phys_children;
|
2010-05-28 20:45:14 +00:00
|
|
|
uint64_t io_parent_count;
|
2008-12-03 20:09:06 +00:00
|
|
|
uint64_t *io_stall;
|
2009-07-02 22:44:48 +00:00
|
|
|
zio_t *io_gang_leader;
|
2008-12-03 20:09:06 +00:00
|
|
|
zio_gang_node_t *io_gang_tree;
|
|
|
|
void *io_executor;
|
2008-11-20 20:01:55 +00:00
|
|
|
void *io_waiter;
|
|
|
|
kmutex_t io_lock;
|
|
|
|
kcondvar_t io_cv;
|
|
|
|
|
|
|
|
/* FMA state */
|
2010-05-28 20:45:14 +00:00
|
|
|
zio_cksum_report_t *io_cksum_report;
|
2008-11-20 20:01:55 +00:00
|
|
|
uint64_t io_ena;
|
2011-11-08 00:26:52 +00:00
|
|
|
|
|
|
|
/* Taskq dispatching state */
|
|
|
|
taskq_ent_t io_tqent;
|
2008-11-20 20:01:55 +00:00
|
|
|
};
|
|
|
|
|
2017-03-21 01:36:00 +00:00
|
|
|
extern int zio_bookmark_compare(const void *, const void *);
|
2016-10-14 00:59:18 +00:00
|
|
|
|
2009-02-18 20:51:31 +00:00
|
|
|
extern zio_t *zio_null(zio_t *pio, spa_t *spa, vdev_t *vd,
|
2010-05-28 20:45:14 +00:00
|
|
|
zio_done_func_t *done, void *private, enum zio_flag flags);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
extern zio_t *zio_root(spa_t *spa,
|
2010-05-28 20:45:14 +00:00
|
|
|
zio_done_func_t *done, void *private, enum zio_flag flags);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2016-07-22 15:52:49 +00:00
|
|
|
extern zio_t *zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
|
|
|
|
struct abd *data, uint64_t lsize, zio_done_func_t *done, void *private,
|
2014-06-25 18:37:59 +00:00
|
|
|
zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
extern zio_t *zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
|
2016-07-22 15:52:49 +00:00
|
|
|
struct abd *data, uint64_t size, uint64_t psize, const zio_prop_t *zp,
|
2016-05-15 15:02:28 +00:00
|
|
|
zio_done_func_t *ready, zio_done_func_t *children_ready,
|
|
|
|
zio_done_func_t *physdone, zio_done_func_t *done,
|
|
|
|
void *private, zio_priority_t priority, enum zio_flag flags,
|
|
|
|
const zbookmark_phys_t *zb);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
extern zio_t *zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
|
2016-07-22 15:52:49 +00:00
|
|
|
struct abd *data, uint64_t size, zio_done_func_t *done, void *private,
|
2014-06-25 18:37:59 +00:00
|
|
|
zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb);
|
2008-12-03 20:09:06 +00:00
|
|
|
|
2013-05-10 19:47:54 +00:00
|
|
|
extern void zio_write_override(zio_t *zio, blkptr_t *bp, int copies,
|
|
|
|
boolean_t nopwrite);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
extern void zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
extern zio_t *zio_claim(zio_t *pio, spa_t *spa, uint64_t txg,
|
|
|
|
const blkptr_t *bp,
|
|
|
|
zio_done_func_t *done, void *private, enum zio_flag flags);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
extern zio_t *zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
zio_done_func_t *done, void *private, enum zio_flag flags);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
extern zio_t *zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
|
2016-07-22 15:52:49 +00:00
|
|
|
uint64_t size, struct abd *data, int checksum,
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
zio_done_func_t *done, void *private, zio_priority_t priority,
|
|
|
|
enum zio_flag flags, boolean_t labels);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
extern zio_t *zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
|
2016-07-22 15:52:49 +00:00
|
|
|
uint64_t size, struct abd *data, int checksum,
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
zio_done_func_t *done, void *private, zio_priority_t priority,
|
|
|
|
enum zio_flag flags, boolean_t labels);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
extern zio_t *zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg,
|
|
|
|
const blkptr_t *bp, enum zio_flag flags);
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
extern int zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg,
|
|
|
|
blkptr_t *new_bp, uint64_t size, boolean_t *slog);
|
2010-05-28 20:45:14 +00:00
|
|
|
extern void zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp);
|
2008-11-20 20:01:55 +00:00
|
|
|
extern void zio_flush(zio_t *zio, vdev_t *vd);
|
2010-05-28 20:45:14 +00:00
|
|
|
extern void zio_shrink(zio_t *zio, uint64_t size);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
extern int zio_wait(zio_t *zio);
|
|
|
|
extern void zio_nowait(zio_t *zio);
|
|
|
|
extern void zio_execute(zio_t *zio);
|
|
|
|
extern void zio_interrupt(zio_t *zio);
|
2016-05-23 17:41:29 +00:00
|
|
|
extern void zio_delay_init(zio_t *zio);
|
|
|
|
extern void zio_delay_interrupt(zio_t *zio);
|
2017-12-18 22:06:07 +00:00
|
|
|
extern void zio_deadman(zio_t *zio, char *tag);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2016-10-14 00:59:18 +00:00
|
|
|
extern zio_t *zio_walk_parents(zio_t *cio, zio_link_t **);
|
|
|
|
extern zio_t *zio_walk_children(zio_t *pio, zio_link_t **);
|
2009-02-18 20:51:31 +00:00
|
|
|
extern zio_t *zio_unique_parent(zio_t *cio);
|
|
|
|
extern void zio_add_child(zio_t *pio, zio_t *cio);
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
extern void *zio_buf_alloc(size_t size);
|
|
|
|
extern void zio_buf_free(void *buf, size_t size);
|
|
|
|
extern void *zio_data_buf_alloc(size_t size);
|
|
|
|
extern void zio_data_buf_free(void *buf, size_t size);
|
|
|
|
|
2016-07-22 15:52:49 +00:00
|
|
|
extern void zio_push_transform(zio_t *zio, struct abd *abd, uint64_t size,
|
2016-06-02 04:04:53 +00:00
|
|
|
uint64_t bufsize, zio_transform_func_t *transform);
|
|
|
|
extern void zio_pop_transforms(zio_t *zio);
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
extern void zio_resubmit_stage_async(void *);
|
|
|
|
|
|
|
|
extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd,
|
2016-07-22 15:52:49 +00:00
|
|
|
uint64_t offset, struct abd *data, uint64_t size, int type,
|
Illumos #4045 write throttle & i/o scheduler performance work
4045 zfs write throttle & i/o scheduler performance work
1. The ZFS i/o scheduler (vdev_queue.c) now divides i/os into 5 classes: sync
read, sync write, async read, async write, and scrub/resilver. The scheduler
issues a number of concurrent i/os from each class to the device. Once a class
has been selected, an i/o is selected from this class using either an elevator
algorithem (async, scrub classes) or FIFO (sync classes). The number of
concurrent async write i/os is tuned dynamically based on i/o load, to achieve
good sync i/o latency when there is not a high load of writes, and good write
throughput when there is. See the block comment in vdev_queue.c (reproduced
below) for more details.
2. The write throttle (dsl_pool_tempreserve_space() and
txg_constrain_throughput()) is rewritten to produce much more consistent delays
when under constant load. The new write throttle is based on the amount of
dirty data, rather than guesses about future performance of the system. When
there is a lot of dirty data, each transaction (e.g. write() syscall) will be
delayed by the same small amount. This eliminates the "brick wall of wait"
that the old write throttle could hit, causing all transactions to wait several
seconds until the next txg opens. One of the keys to the new write throttle is
decrementing the amount of dirty data as i/o completes, rather than at the end
of spa_sync(). Note that the write throttle is only applied once the i/o
scheduler is issuing the maximum number of outstanding async writes. See the
block comments in dsl_pool.c and above dmu_tx_delay() (reproduced below) for
more details.
This diff has several other effects, including:
* the commonly-tuned global variable zfs_vdev_max_pending has been removed;
use per-class zfs_vdev_*_max_active values or zfs_vdev_max_active instead.
* the size of each txg (meaning the amount of dirty data written, and thus the
time it takes to write out) is now controlled differently. There is no longer
an explicit time goal; the primary determinant is amount of dirty data.
Systems that are under light or medium load will now often see that a txg is
always syncing, but the impact to performance (e.g. read latency) is minimal.
Tune zfs_dirty_data_max and zfs_dirty_data_sync to control this.
* zio_taskq_batch_pct = 75 -- Only use 75% of all CPUs for compression,
checksum, etc. This improves latency by not allowing these CPU-intensive tasks
to consume all CPU (on machines with at least 4 CPU's; the percentage is
rounded up).
--matt
APPENDIX: problems with the current i/o scheduler
The current ZFS i/o scheduler (vdev_queue.c) is deadline based. The problem
with this is that if there are always i/os pending, then certain classes of
i/os can see very long delays.
For example, if there are always synchronous reads outstanding, then no async
writes will be serviced until they become "past due". One symptom of this
situation is that each pass of the txg sync takes at least several seconds
(typically 3 seconds).
If many i/os become "past due" (their deadline is in the past), then we must
service all of these overdue i/os before any new i/os. This happens when we
enqueue a batch of async writes for the txg sync, with deadlines 2.5 seconds in
the future. If we can't complete all the i/os in 2.5 seconds (e.g. because
there were always reads pending), then these i/os will become past due. Now we
must service all the "async" writes (which could be hundreds of megabytes)
before we service any reads, introducing considerable latency to synchronous
i/os (reads or ZIL writes).
Notes on porting to ZFS on Linux:
- zio_t gained new members io_physdone and io_phys_children. Because
object caches in the Linux port call the constructor only once at
allocation time, objects may contain residual data when retrieved
from the cache. Therefore zio_create() was updated to zero out the two
new fields.
- vdev_mirror_pending() relied on the depth of the per-vdev pending queue
(vq->vq_pending_tree) to select the least-busy leaf vdev to read from.
This tree has been replaced by vq->vq_active_tree which is now used
for the same purpose.
- vdev_queue_init() used the value of zfs_vdev_max_pending to determine
the number of vdev I/O buffers to pre-allocate. That global no longer
exists, so we instead use the sum of the *_max_active values for each of
the five I/O classes described above.
- The Illumos implementation of dmu_tx_delay() delays a transaction by
sleeping in condition variable embedded in the thread
(curthread->t_delay_cv). We do not have an equivalent CV to use in
Linux, so this change replaced the delay logic with a wrapper called
zfs_sleep_until(). This wrapper could be adopted upstream and in other
downstream ports to abstract away operating system-specific delay logic.
- These tunables are added as module parameters, and descriptions added
to the zfs-module-parameters.5 man page.
spa_asize_inflation
zfs_deadman_synctime_ms
zfs_vdev_max_active
zfs_vdev_async_write_active_min_dirty_percent
zfs_vdev_async_write_active_max_dirty_percent
zfs_vdev_async_read_max_active
zfs_vdev_async_read_min_active
zfs_vdev_async_write_max_active
zfs_vdev_async_write_min_active
zfs_vdev_scrub_max_active
zfs_vdev_scrub_min_active
zfs_vdev_sync_read_max_active
zfs_vdev_sync_read_min_active
zfs_vdev_sync_write_max_active
zfs_vdev_sync_write_min_active
zfs_dirty_data_max_percent
zfs_delay_min_dirty_percent
zfs_dirty_data_max_max_percent
zfs_dirty_data_max
zfs_dirty_data_max_max
zfs_dirty_data_sync
zfs_delay_scale
The latter four have type unsigned long, whereas they are uint64_t in
Illumos. This accommodates Linux's module_param() supported types, but
means they may overflow on 32-bit architectures.
The values zfs_dirty_data_max and zfs_dirty_data_max_max are the most
likely to overflow on 32-bit systems, since they express physical RAM
sizes in bytes. In fact, Illumos initializes zfs_dirty_data_max_max to
2^32 which does overflow. To resolve that, this port instead initializes
it in arc_init() to 25% of physical RAM, and adds the tunable
zfs_dirty_data_max_max_percent to override that percentage. While this
solution doesn't completely avoid the overflow issue, it should be a
reasonable default for most systems, and the minority of affected
systems can work around the issue by overriding the defaults.
- Fixed reversed logic in comment above zfs_delay_scale declaration.
- Clarified comments in vdev_queue.c regarding when per-queue minimums take
effect.
- Replaced dmu_tx_write_limit in the dmu_tx kstat file
with dmu_tx_dirty_delay and dmu_tx_dirty_over_max. The first counts
how many times a transaction has been delayed because the pool dirty
data has exceeded zfs_delay_min_dirty_percent. The latter counts how
many times the pool dirty data has exceeded zfs_dirty_data_max (which
we expect to never happen).
- The original patch would have regressed the bug fixed in
zfsonlinux/zfs@c418410, which prevented users from setting the
zfs_vdev_aggregation_limit tuning larger than SPA_MAXBLOCKSIZE.
A similar fix is added to vdev_queue_aggregate().
- In vdev_queue_io_to_issue(), dynamically allocate 'zio_t search' on the
heap instead of the stack. In Linux we can't afford such large
structures on the stack.
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Ned Bass <bass6@llnl.gov>
Reviewed by: Brendan Gregg <brendan.gregg@joyent.com>
Approved by: Robert Mustacchi <rm@joyent.com>
References:
http://www.illumos.org/issues/4045
illumos/illumos-gate@69962b5647e4a8b9b14998733b765925381b727e
Ported-by: Ned Bass <bass6@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #1913
2013-08-29 03:01:20 +00:00
|
|
|
zio_priority_t priority, enum zio_flag flags,
|
|
|
|
zio_done_func_t *done, void *private);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
extern zio_t *zio_vdev_delegated_io(vdev_t *vd, uint64_t offset,
|
2016-07-22 15:52:49 +00:00
|
|
|
struct abd *data, uint64_t size, int type, zio_priority_t priority,
|
2010-05-28 20:45:14 +00:00
|
|
|
enum zio_flag flags, zio_done_func_t *done, void *private);
|
2008-12-03 20:09:06 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
extern void zio_vdev_io_bypass(zio_t *zio);
|
|
|
|
extern void zio_vdev_io_reissue(zio_t *zio);
|
|
|
|
extern void zio_vdev_io_redone(zio_t *zio);
|
|
|
|
|
2017-12-21 17:13:06 +00:00
|
|
|
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
|
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
extern void zio_checksum_verified(zio_t *zio);
|
2008-12-03 20:09:06 +00:00
|
|
|
extern int zio_worst_error(int e1, int e2);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2010-05-28 20:45:14 +00:00
|
|
|
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,
|
|
|
|
enum zio_checksum parent);
|
|
|
|
extern enum zio_checksum zio_checksum_dedup_select(spa_t *spa,
|
|
|
|
enum zio_checksum child, enum zio_checksum parent);
|
2015-07-06 01:55:32 +00:00
|
|
|
extern enum zio_compress zio_compress_select(spa_t *spa,
|
|
|
|
enum zio_compress child, enum zio_compress parent);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2008-12-03 20:09:06 +00:00
|
|
|
extern void zio_suspend(spa_t *spa, zio_t *zio);
|
2009-07-02 22:44:48 +00:00
|
|
|
extern int zio_resume(spa_t *spa);
|
2008-12-03 20:09:06 +00:00
|
|
|
extern void zio_resume_wait(spa_t *spa);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initial setup and teardown.
|
|
|
|
*/
|
|
|
|
extern void zio_init(void);
|
|
|
|
extern void zio_fini(void);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fault injection
|
|
|
|
*/
|
|
|
|
struct zinject_record;
|
|
|
|
extern uint32_t zio_injection_enabled;
|
|
|
|
extern int zio_inject_fault(char *name, int flags, int *id,
|
|
|
|
struct zinject_record *record);
|
|
|
|
extern int zio_inject_list_next(int *id, char *name, size_t buflen,
|
|
|
|
struct zinject_record *record);
|
|
|
|
extern int zio_clear_fault(int id);
|
2010-05-28 20:45:14 +00:00
|
|
|
extern void zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type);
|
2008-11-20 20:01:55 +00:00
|
|
|
extern int zio_handle_fault_injection(zio_t *zio, int error);
|
2009-07-02 22:44:48 +00:00
|
|
|
extern int zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error);
|
2017-08-14 22:17:15 +00:00
|
|
|
extern int zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1,
|
|
|
|
int err2);
|
2008-12-03 20:09:06 +00:00
|
|
|
extern int zio_handle_label_injection(zio_t *zio, int error);
|
2010-05-28 20:45:14 +00:00
|
|
|
extern void zio_handle_ignored_writes(zio_t *zio);
|
2016-05-23 17:41:29 +00:00
|
|
|
extern hrtime_t zio_handle_io_delay(zio_t *zio);
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Checksum ereport functions
|
|
|
|
*/
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
extern void zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
|
|
|
|
zbookmark_phys_t *zb, struct zio *zio, uint64_t offset, uint64_t length,
|
|
|
|
void *arg, struct zio_bad_cksum *info);
|
2010-05-28 20:45:14 +00:00
|
|
|
extern void zfs_ereport_finish_checksum(zio_cksum_report_t *report,
|
2017-01-05 19:10:07 +00:00
|
|
|
const abd_t *good_data, const abd_t *bad_data, boolean_t drop_if_identical);
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
extern void zfs_ereport_free_checksum(zio_cksum_report_t *report);
|
|
|
|
|
|
|
|
/* If we have the good data in hand, this function can be used */
|
|
|
|
extern void zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
zbookmark_phys_t *zb, struct zio *zio, uint64_t offset, uint64_t length,
|
2017-01-05 19:10:07 +00:00
|
|
|
const abd_t *good_data, const abd_t *bad_data, struct zio_bad_cksum *info);
|
2010-05-28 20:45:14 +00:00
|
|
|
|
|
|
|
/* Called from spa_sync(), but primarily an injection handler */
|
|
|
|
extern void spa_handle_ignored_writes(spa_t *spa);
|
2008-11-20 20:01:55 +00:00
|
|
|
|
2014-06-25 18:37:59 +00:00
|
|
|
/* zbookmark_phys functions */
|
2015-12-22 01:31:57 +00:00
|
|
|
boolean_t zbookmark_subtree_completed(const struct dnode_phys *dnp,
|
|
|
|
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block);
|
|
|
|
int zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2,
|
|
|
|
uint8_t ibs2, const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2);
|
2012-12-13 23:24:15 +00:00
|
|
|
|
2008-11-20 20:01:55 +00:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _ZIO_H */
|