2012-08-29 19:23:12 +00:00
|
|
|
/*
|
|
|
|
* CDDL HEADER START
|
|
|
|
*
|
|
|
|
* The contents of this file are subject to the terms of the
|
|
|
|
* Common Development and Distribution License (the "License").
|
|
|
|
* You may not use this file except in compliance with the License.
|
|
|
|
*
|
|
|
|
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
|
|
|
|
* or http://www.opensolaris.org/os/licensing.
|
|
|
|
* See the License for the specific language governing permissions
|
|
|
|
* and limitations under the License.
|
|
|
|
*
|
|
|
|
* When distributing Covered Code, include this CDDL HEADER in each
|
|
|
|
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
|
|
|
* If applicable, add the following below this CDDL HEADER, with the
|
|
|
|
* fields enclosed by brackets "[]" replaced with your own identifying
|
|
|
|
* information: Portions Copyright [yyyy] [name of copyright owner]
|
|
|
|
*
|
|
|
|
* CDDL HEADER END
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
|
|
|
|
* Use is subject to license terms.
|
|
|
|
*
|
|
|
|
* Portions Copyright 2012 Martin Matuska <martin@matuska.org>
|
|
|
|
*/
|
|
|
|
|
2014-11-03 19:44:19 +00:00
|
|
|
/*
|
2016-07-11 17:45:52 +00:00
|
|
|
* Copyright (c) 2013, 2015 by Delphix. All rights reserved.
|
2014-11-03 19:44:19 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <ctype.h>
|
2012-08-29 19:23:12 +00:00
|
|
|
#include <libnvpair.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <strings.h>
|
|
|
|
#include <unistd.h>
|
2015-07-06 03:20:31 +00:00
|
|
|
#include <stddef.h>
|
2012-08-29 19:23:12 +00:00
|
|
|
|
|
|
|
#include <sys/dmu.h>
|
|
|
|
#include <sys/zfs_ioctl.h>
|
2016-07-11 17:45:52 +00:00
|
|
|
#include <sys/zio.h>
|
2012-08-29 19:23:12 +00:00
|
|
|
#include <zfs_fletcher.h>
|
Add `zstream redup` command to convert deduplicated send streams
Deduplicated send and receive is deprecated. To ease migration to the
new dedup-send-less world, the commit adds a `zstream redup` utility to
convert deduplicated send streams to normal streams, so that they can
continue to be received indefinitely.
The new `zstream` command also replaces the functionality of
`zstreamdump`, by way of the `zstream dump` subcommand. The
`zstreamdump` command is replaced by a shell script which invokes
`zstream dump`.
The way that `zstream redup` works under the hood is that as we read the
send stream, we build up a hash table which maps from `<GUID, object,
offset> -> <file_offset>`.
Whenever we see a WRITE record, we add a new entry to the hash table,
which indicates where in the stream file to find the WRITE record for
this block. (The key is `drr_toguid, drr_object, drr_offset`.)
For entries other than WRITE_BYREF, we pass them through unchanged
(except for the running checksum, which is recalculated).
For WRITE_BYREF records, we change them to WRITE records. We find the
referenced WRITE record by looking in the hash table (for the record
with key `drr_refguid, drr_refobject, drr_refoffset`), and then reading
the record header and payload from the specified offset in the stream
file. This is why the stream can not be a pipe. The found WRITE record
replaces the WRITE_BYREF record, with its `drr_toguid`, `drr_object`,
and `drr_offset` fields changed to be the same as the WRITE_BYREF's
(i.e. we are writing the same logical block, but with the data supplied
by the previous WRITE record).
This algorithm requires memory proportional to the number of WRITE
records (same as `zfs send -D`), but the size per WRITE record is
relatively low (40 bytes, vs. 72 for `zfs send -D`). A 1TB send stream
with 8KB blocks (`recordsize=8k`) would use around 5GB of RAM to
"redup".
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10124
Closes #10156
2020-04-10 17:39:55 +00:00
|
|
|
#include "zstream.h"
|
2012-08-29 19:23:12 +00:00
|
|
|
|
2014-11-03 19:44:19 +00:00
|
|
|
/*
|
|
|
|
* If dump mode is enabled, the number of bytes to print per line
|
|
|
|
*/
|
|
|
|
#define BYTES_PER_LINE 16
|
|
|
|
/*
|
|
|
|
* If dump mode is enabled, the number of bytes to group together, separated
|
|
|
|
* by newlines or spaces
|
|
|
|
*/
|
|
|
|
#define DUMP_GROUPING 4
|
|
|
|
|
2012-08-29 19:23:12 +00:00
|
|
|
uint64_t total_stream_len = 0;
|
|
|
|
FILE *send_stream = 0;
|
|
|
|
boolean_t do_byteswap = B_FALSE;
|
|
|
|
boolean_t do_cksum = B_TRUE;
|
|
|
|
|
2014-11-03 20:15:08 +00:00
|
|
|
static void *
|
|
|
|
safe_malloc(size_t size)
|
|
|
|
{
|
|
|
|
void *rv = malloc(size);
|
|
|
|
if (rv == NULL) {
|
2015-07-06 03:20:31 +00:00
|
|
|
(void) fprintf(stderr, "ERROR; failed to allocate %zu bytes\n",
|
|
|
|
size);
|
2014-11-03 20:15:08 +00:00
|
|
|
abort();
|
|
|
|
}
|
|
|
|
return (rv);
|
|
|
|
}
|
|
|
|
|
2012-08-29 19:23:12 +00:00
|
|
|
/*
|
|
|
|
* ssread - send stream read.
|
|
|
|
*
|
|
|
|
* Read while computing incremental checksum
|
|
|
|
*/
|
|
|
|
static size_t
|
|
|
|
ssread(void *buf, size_t len, zio_cksum_t *cksum)
|
|
|
|
{
|
|
|
|
size_t outlen;
|
|
|
|
|
|
|
|
if ((outlen = fread(buf, len, 1, send_stream)) == 0)
|
|
|
|
return (0);
|
|
|
|
|
2015-07-06 03:20:31 +00:00
|
|
|
if (do_cksum) {
|
2012-08-29 19:23:12 +00:00
|
|
|
if (do_byteswap)
|
|
|
|
fletcher_4_incremental_byteswap(buf, len, cksum);
|
|
|
|
else
|
|
|
|
fletcher_4_incremental_native(buf, len, cksum);
|
|
|
|
}
|
|
|
|
total_stream_len += len;
|
|
|
|
return (outlen);
|
|
|
|
}
|
|
|
|
|
2015-07-06 03:20:31 +00:00
|
|
|
static size_t
|
|
|
|
read_hdr(dmu_replay_record_t *drr, zio_cksum_t *cksum)
|
|
|
|
{
|
|
|
|
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
|
|
|
|
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
|
|
|
|
size_t r = ssread(drr, sizeof (*drr) - sizeof (zio_cksum_t), cksum);
|
|
|
|
if (r == 0)
|
|
|
|
return (0);
|
|
|
|
zio_cksum_t saved_cksum = *cksum;
|
|
|
|
r = ssread(&drr->drr_u.drr_checksum.drr_checksum,
|
|
|
|
sizeof (zio_cksum_t), cksum);
|
|
|
|
if (r == 0)
|
|
|
|
return (0);
|
2020-02-13 19:24:57 +00:00
|
|
|
if (do_cksum &&
|
|
|
|
!ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.drr_checksum.drr_checksum) &&
|
2015-07-06 03:20:31 +00:00
|
|
|
!ZIO_CHECKSUM_EQUAL(saved_cksum,
|
|
|
|
drr->drr_u.drr_checksum.drr_checksum)) {
|
|
|
|
fprintf(stderr, "invalid checksum\n");
|
|
|
|
(void) printf("Incorrect checksum in record header.\n");
|
|
|
|
(void) printf("Expected checksum = %llx/%llx/%llx/%llx\n",
|
|
|
|
(longlong_t)saved_cksum.zc_word[0],
|
|
|
|
(longlong_t)saved_cksum.zc_word[1],
|
|
|
|
(longlong_t)saved_cksum.zc_word[2],
|
|
|
|
(longlong_t)saved_cksum.zc_word[3]);
|
2016-01-06 21:22:48 +00:00
|
|
|
return (0);
|
2015-07-06 03:20:31 +00:00
|
|
|
}
|
|
|
|
return (sizeof (*drr));
|
|
|
|
}
|
|
|
|
|
2014-11-03 19:44:19 +00:00
|
|
|
/*
|
|
|
|
* Print part of a block in ASCII characters
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
print_ascii_block(char *subbuf, int length)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < length; i++) {
|
|
|
|
char char_print = isprint(subbuf[i]) ? subbuf[i] : '.';
|
|
|
|
if (i != 0 && i % DUMP_GROUPING == 0) {
|
|
|
|
(void) printf(" ");
|
|
|
|
}
|
|
|
|
(void) printf("%c", char_print);
|
|
|
|
}
|
|
|
|
(void) printf("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* print_block - Dump the contents of a modified block to STDOUT
|
|
|
|
*
|
|
|
|
* Assume that buf has capacity evenly divisible by BYTES_PER_LINE
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
print_block(char *buf, int length)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
/*
|
|
|
|
* Start printing ASCII characters at a constant offset, after
|
|
|
|
* the hex prints. Leave 3 characters per byte on a line (2 digit
|
|
|
|
* hex number plus 1 space) plus spaces between characters and
|
2015-07-06 03:20:31 +00:00
|
|
|
* groupings.
|
2014-11-03 19:44:19 +00:00
|
|
|
*/
|
|
|
|
int ascii_start = BYTES_PER_LINE * 3 +
|
|
|
|
BYTES_PER_LINE / DUMP_GROUPING + 2;
|
|
|
|
|
|
|
|
for (i = 0; i < length; i += BYTES_PER_LINE) {
|
|
|
|
int j;
|
|
|
|
int this_line_length = MIN(BYTES_PER_LINE, length - i);
|
|
|
|
int print_offset = 0;
|
|
|
|
|
|
|
|
for (j = 0; j < this_line_length; j++) {
|
|
|
|
int buf_offset = i + j;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Separate every DUMP_GROUPING bytes by a space.
|
|
|
|
*/
|
|
|
|
if (buf_offset % DUMP_GROUPING == 0) {
|
|
|
|
print_offset += printf(" ");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print the two-digit hex value for this byte.
|
|
|
|
*/
|
|
|
|
unsigned char hex_print = buf[buf_offset];
|
|
|
|
print_offset += printf("%02x ", hex_print);
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) printf("%*s", ascii_start - print_offset, " ");
|
|
|
|
|
|
|
|
print_ascii_block(buf + i, this_line_length);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
/*
|
2019-08-30 16:43:30 +00:00
|
|
|
* Print an array of bytes to stdout as hexadecimal characters. str must
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
* have buf_len * 2 + 1 bytes of space.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
sprintf_bytes(char *str, uint8_t *buf, uint_t buf_len)
|
|
|
|
{
|
|
|
|
int i, n;
|
|
|
|
|
|
|
|
for (i = 0; i < buf_len; i++) {
|
|
|
|
n = sprintf(str, "%02x", buf[i] & 0xff);
|
|
|
|
str += n;
|
|
|
|
}
|
|
|
|
|
|
|
|
str[0] = '\0';
|
|
|
|
}
|
|
|
|
|
2012-08-29 19:23:12 +00:00
|
|
|
int
|
Add `zstream redup` command to convert deduplicated send streams
Deduplicated send and receive is deprecated. To ease migration to the
new dedup-send-less world, the commit adds a `zstream redup` utility to
convert deduplicated send streams to normal streams, so that they can
continue to be received indefinitely.
The new `zstream` command also replaces the functionality of
`zstreamdump`, by way of the `zstream dump` subcommand. The
`zstreamdump` command is replaced by a shell script which invokes
`zstream dump`.
The way that `zstream redup` works under the hood is that as we read the
send stream, we build up a hash table which maps from `<GUID, object,
offset> -> <file_offset>`.
Whenever we see a WRITE record, we add a new entry to the hash table,
which indicates where in the stream file to find the WRITE record for
this block. (The key is `drr_toguid, drr_object, drr_offset`.)
For entries other than WRITE_BYREF, we pass them through unchanged
(except for the running checksum, which is recalculated).
For WRITE_BYREF records, we change them to WRITE records. We find the
referenced WRITE record by looking in the hash table (for the record
with key `drr_refguid, drr_refobject, drr_refoffset`), and then reading
the record header and payload from the specified offset in the stream
file. This is why the stream can not be a pipe. The found WRITE record
replaces the WRITE_BYREF record, with its `drr_toguid`, `drr_object`,
and `drr_offset` fields changed to be the same as the WRITE_BYREF's
(i.e. we are writing the same logical block, but with the data supplied
by the previous WRITE record).
This algorithm requires memory proportional to the number of WRITE
records (same as `zfs send -D`), but the size per WRITE record is
relatively low (40 bytes, vs. 72 for `zfs send -D`). A 1TB send stream
with 8KB blocks (`recordsize=8k`) would use around 5GB of RAM to
"redup".
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10124
Closes #10156
2020-04-10 17:39:55 +00:00
|
|
|
zstream_do_dump(int argc, char *argv[])
|
2012-08-29 19:23:12 +00:00
|
|
|
{
|
2014-11-03 20:15:08 +00:00
|
|
|
char *buf = safe_malloc(SPA_MAXBLOCKSIZE);
|
2014-06-05 21:19:08 +00:00
|
|
|
uint64_t drr_record_count[DRR_NUMTYPES] = { 0 };
|
2019-06-22 23:33:44 +00:00
|
|
|
uint64_t total_payload_size = 0;
|
|
|
|
uint64_t total_overhead_size = 0;
|
|
|
|
uint64_t drr_byte_count[DRR_NUMTYPES] = { 0 };
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
char salt[ZIO_DATA_SALT_LEN * 2 + 1];
|
|
|
|
char iv[ZIO_DATA_IV_LEN * 2 + 1];
|
|
|
|
char mac[ZIO_DATA_MAC_LEN * 2 + 1];
|
2014-06-05 21:19:08 +00:00
|
|
|
uint64_t total_records = 0;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
uint64_t payload_size;
|
2012-08-29 19:23:12 +00:00
|
|
|
dmu_replay_record_t thedrr;
|
|
|
|
dmu_replay_record_t *drr = &thedrr;
|
|
|
|
struct drr_begin *drrb = &thedrr.drr_u.drr_begin;
|
|
|
|
struct drr_end *drre = &thedrr.drr_u.drr_end;
|
|
|
|
struct drr_object *drro = &thedrr.drr_u.drr_object;
|
|
|
|
struct drr_freeobjects *drrfo = &thedrr.drr_u.drr_freeobjects;
|
|
|
|
struct drr_write *drrw = &thedrr.drr_u.drr_write;
|
|
|
|
struct drr_write_byref *drrwbr = &thedrr.drr_u.drr_write_byref;
|
|
|
|
struct drr_free *drrf = &thedrr.drr_u.drr_free;
|
|
|
|
struct drr_spill *drrs = &thedrr.drr_u.drr_spill;
|
2014-06-05 21:19:08 +00:00
|
|
|
struct drr_write_embedded *drrwe = &thedrr.drr_u.drr_write_embedded;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
struct drr_object_range *drror = &thedrr.drr_u.drr_object_range;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
struct drr_redact *drrr = &thedrr.drr_u.drr_redact;
|
2015-07-06 03:20:31 +00:00
|
|
|
struct drr_checksum *drrc = &thedrr.drr_u.drr_checksum;
|
2019-05-28 18:14:58 +00:00
|
|
|
int c;
|
2012-08-29 19:23:12 +00:00
|
|
|
boolean_t verbose = B_FALSE;
|
2015-07-06 03:20:31 +00:00
|
|
|
boolean_t very_verbose = B_FALSE;
|
2012-08-29 19:23:12 +00:00
|
|
|
boolean_t first = B_TRUE;
|
2014-11-03 19:44:19 +00:00
|
|
|
/*
|
|
|
|
* dump flag controls whether the contents of any modified data blocks
|
|
|
|
* are printed to the console during processing of the stream. Warning:
|
|
|
|
* for large streams, this can obviously lead to massive prints.
|
|
|
|
*/
|
|
|
|
boolean_t dump = B_FALSE;
|
2012-08-29 19:23:12 +00:00
|
|
|
int err;
|
|
|
|
zio_cksum_t zc = { { 0 } };
|
|
|
|
zio_cksum_t pcksum = { { 0 } };
|
|
|
|
|
2014-11-03 19:44:19 +00:00
|
|
|
while ((c = getopt(argc, argv, ":vCd")) != -1) {
|
2012-08-29 19:23:12 +00:00
|
|
|
switch (c) {
|
|
|
|
case 'C':
|
|
|
|
do_cksum = B_FALSE;
|
|
|
|
break;
|
|
|
|
case 'v':
|
2015-07-06 03:20:31 +00:00
|
|
|
if (verbose)
|
|
|
|
very_verbose = B_TRUE;
|
2012-08-29 19:23:12 +00:00
|
|
|
verbose = B_TRUE;
|
|
|
|
break;
|
2014-11-03 19:44:19 +00:00
|
|
|
case 'd':
|
|
|
|
dump = B_TRUE;
|
|
|
|
verbose = B_TRUE;
|
2015-07-06 03:20:31 +00:00
|
|
|
very_verbose = B_TRUE;
|
2014-11-03 19:44:19 +00:00
|
|
|
break;
|
2012-08-29 19:23:12 +00:00
|
|
|
case ':':
|
|
|
|
(void) fprintf(stderr,
|
|
|
|
"missing argument for '%c' option\n", optopt);
|
Add `zstream redup` command to convert deduplicated send streams
Deduplicated send and receive is deprecated. To ease migration to the
new dedup-send-less world, the commit adds a `zstream redup` utility to
convert deduplicated send streams to normal streams, so that they can
continue to be received indefinitely.
The new `zstream` command also replaces the functionality of
`zstreamdump`, by way of the `zstream dump` subcommand. The
`zstreamdump` command is replaced by a shell script which invokes
`zstream dump`.
The way that `zstream redup` works under the hood is that as we read the
send stream, we build up a hash table which maps from `<GUID, object,
offset> -> <file_offset>`.
Whenever we see a WRITE record, we add a new entry to the hash table,
which indicates where in the stream file to find the WRITE record for
this block. (The key is `drr_toguid, drr_object, drr_offset`.)
For entries other than WRITE_BYREF, we pass them through unchanged
(except for the running checksum, which is recalculated).
For WRITE_BYREF records, we change them to WRITE records. We find the
referenced WRITE record by looking in the hash table (for the record
with key `drr_refguid, drr_refobject, drr_refoffset`), and then reading
the record header and payload from the specified offset in the stream
file. This is why the stream can not be a pipe. The found WRITE record
replaces the WRITE_BYREF record, with its `drr_toguid`, `drr_object`,
and `drr_offset` fields changed to be the same as the WRITE_BYREF's
(i.e. we are writing the same logical block, but with the data supplied
by the previous WRITE record).
This algorithm requires memory proportional to the number of WRITE
records (same as `zfs send -D`), but the size per WRITE record is
relatively low (40 bytes, vs. 72 for `zfs send -D`). A 1TB send stream
with 8KB blocks (`recordsize=8k`) would use around 5GB of RAM to
"redup".
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10124
Closes #10156
2020-04-10 17:39:55 +00:00
|
|
|
zstream_usage();
|
2012-08-29 19:23:12 +00:00
|
|
|
break;
|
|
|
|
case '?':
|
|
|
|
(void) fprintf(stderr, "invalid option '%c'\n",
|
|
|
|
optopt);
|
Add `zstream redup` command to convert deduplicated send streams
Deduplicated send and receive is deprecated. To ease migration to the
new dedup-send-less world, the commit adds a `zstream redup` utility to
convert deduplicated send streams to normal streams, so that they can
continue to be received indefinitely.
The new `zstream` command also replaces the functionality of
`zstreamdump`, by way of the `zstream dump` subcommand. The
`zstreamdump` command is replaced by a shell script which invokes
`zstream dump`.
The way that `zstream redup` works under the hood is that as we read the
send stream, we build up a hash table which maps from `<GUID, object,
offset> -> <file_offset>`.
Whenever we see a WRITE record, we add a new entry to the hash table,
which indicates where in the stream file to find the WRITE record for
this block. (The key is `drr_toguid, drr_object, drr_offset`.)
For entries other than WRITE_BYREF, we pass them through unchanged
(except for the running checksum, which is recalculated).
For WRITE_BYREF records, we change them to WRITE records. We find the
referenced WRITE record by looking in the hash table (for the record
with key `drr_refguid, drr_refobject, drr_refoffset`), and then reading
the record header and payload from the specified offset in the stream
file. This is why the stream can not be a pipe. The found WRITE record
replaces the WRITE_BYREF record, with its `drr_toguid`, `drr_object`,
and `drr_offset` fields changed to be the same as the WRITE_BYREF's
(i.e. we are writing the same logical block, but with the data supplied
by the previous WRITE record).
This algorithm requires memory proportional to the number of WRITE
records (same as `zfs send -D`), but the size per WRITE record is
relatively low (40 bytes, vs. 72 for `zfs send -D`). A 1TB send stream
with 8KB blocks (`recordsize=8k`) would use around 5GB of RAM to
"redup".
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10124
Closes #10156
2020-04-10 17:39:55 +00:00
|
|
|
zstream_usage();
|
2016-07-11 17:45:52 +00:00
|
|
|
break;
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Add `zstream redup` command to convert deduplicated send streams
Deduplicated send and receive is deprecated. To ease migration to the
new dedup-send-less world, the commit adds a `zstream redup` utility to
convert deduplicated send streams to normal streams, so that they can
continue to be received indefinitely.
The new `zstream` command also replaces the functionality of
`zstreamdump`, by way of the `zstream dump` subcommand. The
`zstreamdump` command is replaced by a shell script which invokes
`zstream dump`.
The way that `zstream redup` works under the hood is that as we read the
send stream, we build up a hash table which maps from `<GUID, object,
offset> -> <file_offset>`.
Whenever we see a WRITE record, we add a new entry to the hash table,
which indicates where in the stream file to find the WRITE record for
this block. (The key is `drr_toguid, drr_object, drr_offset`.)
For entries other than WRITE_BYREF, we pass them through unchanged
(except for the running checksum, which is recalculated).
For WRITE_BYREF records, we change them to WRITE records. We find the
referenced WRITE record by looking in the hash table (for the record
with key `drr_refguid, drr_refobject, drr_refoffset`), and then reading
the record header and payload from the specified offset in the stream
file. This is why the stream can not be a pipe. The found WRITE record
replaces the WRITE_BYREF record, with its `drr_toguid`, `drr_object`,
and `drr_offset` fields changed to be the same as the WRITE_BYREF's
(i.e. we are writing the same logical block, but with the data supplied
by the previous WRITE record).
This algorithm requires memory proportional to the number of WRITE
records (same as `zfs send -D`), but the size per WRITE record is
relatively low (40 bytes, vs. 72 for `zfs send -D`). A 1TB send stream
with 8KB blocks (`recordsize=8k`) would use around 5GB of RAM to
"redup".
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Paul Dagnelie <pcd@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Matthew Ahrens <mahrens@delphix.com>
Closes #10124
Closes #10156
2020-04-10 17:39:55 +00:00
|
|
|
if (argc > optind) {
|
|
|
|
const char *filename = argv[optind];
|
|
|
|
send_stream = fopen(filename, "r");
|
|
|
|
if (send_stream == NULL) {
|
|
|
|
(void) fprintf(stderr,
|
|
|
|
"Error while opening file '%s': %s\n",
|
|
|
|
filename, strerror(errno));
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (isatty(STDIN_FILENO)) {
|
|
|
|
(void) fprintf(stderr,
|
|
|
|
"Error: The send stream is a binary format "
|
|
|
|
"and can not be read from a\n"
|
|
|
|
"terminal. Standard input must be redirected, "
|
|
|
|
"or a file must be\n"
|
|
|
|
"specified as a command-line argument.\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
send_stream = stdin;
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
|
|
|
|
2016-11-29 21:47:05 +00:00
|
|
|
fletcher_4_init();
|
2015-07-06 03:20:31 +00:00
|
|
|
while (read_hdr(drr, &zc)) {
|
2012-08-29 19:23:12 +00:00
|
|
|
|
2014-11-03 19:44:19 +00:00
|
|
|
/*
|
|
|
|
* If this is the first DMU record being processed, check for
|
|
|
|
* the magic bytes and figure out the endian-ness based on them.
|
|
|
|
*/
|
2012-08-29 19:23:12 +00:00
|
|
|
if (first) {
|
|
|
|
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
|
|
|
|
do_byteswap = B_TRUE;
|
|
|
|
if (do_cksum) {
|
|
|
|
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
|
|
|
|
/*
|
|
|
|
* recalculate header checksum now
|
|
|
|
* that we know it needs to be
|
|
|
|
* byteswapped.
|
|
|
|
*/
|
|
|
|
fletcher_4_incremental_byteswap(drr,
|
|
|
|
sizeof (dmu_replay_record_t), &zc);
|
|
|
|
}
|
|
|
|
} else if (drrb->drr_magic != DMU_BACKUP_MAGIC) {
|
|
|
|
(void) fprintf(stderr, "Invalid stream "
|
|
|
|
"(bad magic number)\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
first = B_FALSE;
|
|
|
|
}
|
|
|
|
if (do_byteswap) {
|
|
|
|
drr->drr_type = BSWAP_32(drr->drr_type);
|
|
|
|
drr->drr_payloadlen =
|
|
|
|
BSWAP_32(drr->drr_payloadlen);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point, the leading fields of the replay record
|
|
|
|
* (drr_type and drr_payloadlen) have been byte-swapped if
|
|
|
|
* necessary, but the rest of the data structure (the
|
|
|
|
* union of type-specific structures) is still in its
|
|
|
|
* original state.
|
|
|
|
*/
|
|
|
|
if (drr->drr_type >= DRR_NUMTYPES) {
|
|
|
|
(void) printf("INVALID record found: type 0x%x\n",
|
|
|
|
drr->drr_type);
|
|
|
|
(void) printf("Aborting.\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
drr_record_count[drr->drr_type]++;
|
2019-06-22 23:33:44 +00:00
|
|
|
total_overhead_size += sizeof (*drr);
|
2014-06-05 21:19:08 +00:00
|
|
|
total_records++;
|
2019-06-22 23:33:44 +00:00
|
|
|
payload_size = 0;
|
2012-08-29 19:23:12 +00:00
|
|
|
|
|
|
|
switch (drr->drr_type) {
|
|
|
|
case DRR_BEGIN:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drrb->drr_magic = BSWAP_64(drrb->drr_magic);
|
|
|
|
drrb->drr_versioninfo =
|
|
|
|
BSWAP_64(drrb->drr_versioninfo);
|
|
|
|
drrb->drr_creation_time =
|
|
|
|
BSWAP_64(drrb->drr_creation_time);
|
|
|
|
drrb->drr_type = BSWAP_32(drrb->drr_type);
|
|
|
|
drrb->drr_flags = BSWAP_32(drrb->drr_flags);
|
|
|
|
drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
|
|
|
|
drrb->drr_fromguid =
|
|
|
|
BSWAP_64(drrb->drr_fromguid);
|
|
|
|
}
|
|
|
|
|
|
|
|
(void) printf("BEGIN record\n");
|
|
|
|
(void) printf("\thdrtype = %lld\n",
|
|
|
|
DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo));
|
|
|
|
(void) printf("\tfeatures = %llx\n",
|
|
|
|
DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo));
|
|
|
|
(void) printf("\tmagic = %llx\n",
|
|
|
|
(u_longlong_t)drrb->drr_magic);
|
|
|
|
(void) printf("\tcreation_time = %llx\n",
|
|
|
|
(u_longlong_t)drrb->drr_creation_time);
|
|
|
|
(void) printf("\ttype = %u\n", drrb->drr_type);
|
|
|
|
(void) printf("\tflags = 0x%x\n", drrb->drr_flags);
|
|
|
|
(void) printf("\ttoguid = %llx\n",
|
|
|
|
(u_longlong_t)drrb->drr_toguid);
|
|
|
|
(void) printf("\tfromguid = %llx\n",
|
|
|
|
(u_longlong_t)drrb->drr_fromguid);
|
|
|
|
(void) printf("\ttoname = %s\n", drrb->drr_toname);
|
2020-06-27 17:29:47 +00:00
|
|
|
(void) printf("\tpayloadlen = %u\n",
|
|
|
|
drr->drr_payloadlen);
|
2012-08-29 19:23:12 +00:00
|
|
|
if (verbose)
|
|
|
|
(void) printf("\n");
|
|
|
|
|
2016-01-06 21:22:48 +00:00
|
|
|
if (drr->drr_payloadlen != 0) {
|
2012-08-29 19:23:12 +00:00
|
|
|
nvlist_t *nv;
|
|
|
|
int sz = drr->drr_payloadlen;
|
|
|
|
|
2014-11-03 20:15:08 +00:00
|
|
|
if (sz > SPA_MAXBLOCKSIZE) {
|
2012-08-29 19:23:12 +00:00
|
|
|
free(buf);
|
2014-11-03 20:15:08 +00:00
|
|
|
buf = safe_malloc(sz);
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
|
|
|
(void) ssread(buf, sz, &zc);
|
|
|
|
if (ferror(send_stream))
|
|
|
|
perror("fread");
|
|
|
|
err = nvlist_unpack(buf, sz, &nv, 0);
|
2018-09-18 16:43:09 +00:00
|
|
|
if (err) {
|
2012-08-29 19:23:12 +00:00
|
|
|
perror(strerror(err));
|
2018-09-18 16:43:09 +00:00
|
|
|
} else {
|
|
|
|
nvlist_print(stdout, nv);
|
|
|
|
nvlist_free(nv);
|
|
|
|
}
|
2019-06-22 23:33:44 +00:00
|
|
|
payload_size = sz;
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DRR_END:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drre->drr_checksum.zc_word[0] =
|
|
|
|
BSWAP_64(drre->drr_checksum.zc_word[0]);
|
|
|
|
drre->drr_checksum.zc_word[1] =
|
|
|
|
BSWAP_64(drre->drr_checksum.zc_word[1]);
|
|
|
|
drre->drr_checksum.zc_word[2] =
|
|
|
|
BSWAP_64(drre->drr_checksum.zc_word[2]);
|
|
|
|
drre->drr_checksum.zc_word[3] =
|
|
|
|
BSWAP_64(drre->drr_checksum.zc_word[3]);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We compare against the *previous* checksum
|
|
|
|
* value, because the stored checksum is of
|
|
|
|
* everything before the DRR_END record.
|
|
|
|
*/
|
|
|
|
if (do_cksum && !ZIO_CHECKSUM_EQUAL(drre->drr_checksum,
|
|
|
|
pcksum)) {
|
|
|
|
(void) printf("Expected checksum differs from "
|
|
|
|
"checksum in stream.\n");
|
|
|
|
(void) printf("Expected checksum = "
|
|
|
|
"%llx/%llx/%llx/%llx\n",
|
|
|
|
(long long unsigned int)pcksum.zc_word[0],
|
|
|
|
(long long unsigned int)pcksum.zc_word[1],
|
|
|
|
(long long unsigned int)pcksum.zc_word[2],
|
|
|
|
(long long unsigned int)pcksum.zc_word[3]);
|
|
|
|
}
|
|
|
|
(void) printf("END checksum = %llx/%llx/%llx/%llx\n",
|
|
|
|
(long long unsigned int)
|
|
|
|
drre->drr_checksum.zc_word[0],
|
|
|
|
(long long unsigned int)
|
|
|
|
drre->drr_checksum.zc_word[1],
|
|
|
|
(long long unsigned int)
|
|
|
|
drre->drr_checksum.zc_word[2],
|
|
|
|
(long long unsigned int)
|
|
|
|
drre->drr_checksum.zc_word[3]);
|
|
|
|
|
|
|
|
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DRR_OBJECT:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drro->drr_object = BSWAP_64(drro->drr_object);
|
|
|
|
drro->drr_type = BSWAP_32(drro->drr_type);
|
|
|
|
drro->drr_bonustype =
|
|
|
|
BSWAP_32(drro->drr_bonustype);
|
|
|
|
drro->drr_blksz = BSWAP_32(drro->drr_blksz);
|
|
|
|
drro->drr_bonuslen =
|
|
|
|
BSWAP_32(drro->drr_bonuslen);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
drro->drr_raw_bonuslen =
|
|
|
|
BSWAP_32(drro->drr_raw_bonuslen);
|
2012-08-29 19:23:12 +00:00
|
|
|
drro->drr_toguid = BSWAP_64(drro->drr_toguid);
|
2017-11-08 19:12:59 +00:00
|
|
|
drro->drr_maxblkid =
|
|
|
|
BSWAP_64(drro->drr_maxblkid);
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
|
2022-02-03 22:28:19 +00:00
|
|
|
if (drro->drr_bonuslen > drro->drr_raw_bonuslen) {
|
|
|
|
(void) fprintf(stderr,
|
|
|
|
"Warning: Object %llu has bonuslen = "
|
|
|
|
"%u > raw_bonuslen = %u\n\n",
|
|
|
|
(u_longlong_t)drro->drr_object,
|
|
|
|
drro->drr_bonuslen, drro->drr_raw_bonuslen);
|
|
|
|
}
|
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
payload_size = DRR_OBJECT_PAYLOAD_SIZE(drro);
|
|
|
|
|
2012-08-29 19:23:12 +00:00
|
|
|
if (verbose) {
|
|
|
|
(void) printf("OBJECT object = %llu type = %u "
|
2017-07-26 01:52:40 +00:00
|
|
|
"bonustype = %u blksz = %u bonuslen = %u "
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
"dn_slots = %u raw_bonuslen = %u "
|
2017-11-08 19:12:59 +00:00
|
|
|
"flags = %u maxblkid = %llu "
|
|
|
|
"indblkshift = %u nlevels = %u "
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
"nblkptr = %u\n",
|
2012-08-29 19:23:12 +00:00
|
|
|
(u_longlong_t)drro->drr_object,
|
|
|
|
drro->drr_type,
|
|
|
|
drro->drr_bonustype,
|
|
|
|
drro->drr_blksz,
|
2017-07-26 01:52:40 +00:00
|
|
|
drro->drr_bonuslen,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
drro->drr_dn_slots,
|
|
|
|
drro->drr_raw_bonuslen,
|
|
|
|
drro->drr_flags,
|
2017-11-08 19:12:59 +00:00
|
|
|
(u_longlong_t)drro->drr_maxblkid,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
drro->drr_indblkshift,
|
|
|
|
drro->drr_nlevels,
|
|
|
|
drro->drr_nblkptr);
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
|
|
|
if (drro->drr_bonuslen > 0) {
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
(void) ssread(buf, payload_size, &zc);
|
|
|
|
if (dump)
|
|
|
|
print_block(buf, payload_size);
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DRR_FREEOBJECTS:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drrfo->drr_firstobj =
|
|
|
|
BSWAP_64(drrfo->drr_firstobj);
|
|
|
|
drrfo->drr_numobjs =
|
|
|
|
BSWAP_64(drrfo->drr_numobjs);
|
|
|
|
drrfo->drr_toguid = BSWAP_64(drrfo->drr_toguid);
|
|
|
|
}
|
|
|
|
if (verbose) {
|
|
|
|
(void) printf("FREEOBJECTS firstobj = %llu "
|
|
|
|
"numobjs = %llu\n",
|
|
|
|
(u_longlong_t)drrfo->drr_firstobj,
|
|
|
|
(u_longlong_t)drrfo->drr_numobjs);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DRR_WRITE:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drrw->drr_object = BSWAP_64(drrw->drr_object);
|
|
|
|
drrw->drr_type = BSWAP_32(drrw->drr_type);
|
|
|
|
drrw->drr_offset = BSWAP_64(drrw->drr_offset);
|
2016-07-11 17:45:52 +00:00
|
|
|
drrw->drr_logical_size =
|
|
|
|
BSWAP_64(drrw->drr_logical_size);
|
2012-08-29 19:23:12 +00:00
|
|
|
drrw->drr_toguid = BSWAP_64(drrw->drr_toguid);
|
|
|
|
drrw->drr_key.ddk_prop =
|
|
|
|
BSWAP_64(drrw->drr_key.ddk_prop);
|
2016-07-11 17:45:52 +00:00
|
|
|
drrw->drr_compressed_size =
|
|
|
|
BSWAP_64(drrw->drr_compressed_size);
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
2016-07-11 17:45:52 +00:00
|
|
|
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
payload_size = DRR_WRITE_PAYLOAD_SIZE(drrw);
|
2016-07-11 17:45:52 +00:00
|
|
|
|
2014-11-03 19:44:19 +00:00
|
|
|
/*
|
|
|
|
* If this is verbose and/or dump output,
|
|
|
|
* print info on the modified block
|
|
|
|
*/
|
2012-08-29 19:23:12 +00:00
|
|
|
if (verbose) {
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
sprintf_bytes(salt, drrw->drr_salt,
|
|
|
|
ZIO_DATA_SALT_LEN);
|
|
|
|
sprintf_bytes(iv, drrw->drr_iv,
|
|
|
|
ZIO_DATA_IV_LEN);
|
|
|
|
sprintf_bytes(mac, drrw->drr_mac,
|
|
|
|
ZIO_DATA_MAC_LEN);
|
|
|
|
|
2012-08-29 19:23:12 +00:00
|
|
|
(void) printf("WRITE object = %llu type = %u "
|
2019-03-13 18:19:23 +00:00
|
|
|
"checksum type = %u compression type = %u "
|
|
|
|
"flags = %u offset = %llu "
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
"logical_size = %llu "
|
2016-07-11 17:45:52 +00:00
|
|
|
"compressed_size = %llu "
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
"payload_size = %llu props = %llx "
|
|
|
|
"salt = %s iv = %s mac = %s\n",
|
2012-08-29 19:23:12 +00:00
|
|
|
(u_longlong_t)drrw->drr_object,
|
|
|
|
drrw->drr_type,
|
|
|
|
drrw->drr_checksumtype,
|
2016-07-11 17:45:52 +00:00
|
|
|
drrw->drr_compressiontype,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
drrw->drr_flags,
|
2012-08-29 19:23:12 +00:00
|
|
|
(u_longlong_t)drrw->drr_offset,
|
2016-07-11 17:45:52 +00:00
|
|
|
(u_longlong_t)drrw->drr_logical_size,
|
|
|
|
(u_longlong_t)drrw->drr_compressed_size,
|
|
|
|
(u_longlong_t)payload_size,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
(u_longlong_t)drrw->drr_key.ddk_prop,
|
|
|
|
salt,
|
|
|
|
iv,
|
|
|
|
mac);
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
2016-07-11 17:45:52 +00:00
|
|
|
|
2014-11-03 19:44:19 +00:00
|
|
|
/*
|
|
|
|
* Read the contents of the block in from STDIN to buf
|
|
|
|
*/
|
2016-07-11 17:45:52 +00:00
|
|
|
(void) ssread(buf, payload_size, &zc);
|
2014-11-03 19:44:19 +00:00
|
|
|
/*
|
|
|
|
* If in dump mode
|
|
|
|
*/
|
|
|
|
if (dump) {
|
2016-07-11 17:45:52 +00:00
|
|
|
print_block(buf, payload_size);
|
2014-11-03 19:44:19 +00:00
|
|
|
}
|
2012-08-29 19:23:12 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case DRR_WRITE_BYREF:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drrwbr->drr_object =
|
|
|
|
BSWAP_64(drrwbr->drr_object);
|
|
|
|
drrwbr->drr_offset =
|
|
|
|
BSWAP_64(drrwbr->drr_offset);
|
|
|
|
drrwbr->drr_length =
|
|
|
|
BSWAP_64(drrwbr->drr_length);
|
|
|
|
drrwbr->drr_toguid =
|
|
|
|
BSWAP_64(drrwbr->drr_toguid);
|
|
|
|
drrwbr->drr_refguid =
|
|
|
|
BSWAP_64(drrwbr->drr_refguid);
|
|
|
|
drrwbr->drr_refobject =
|
|
|
|
BSWAP_64(drrwbr->drr_refobject);
|
|
|
|
drrwbr->drr_refoffset =
|
|
|
|
BSWAP_64(drrwbr->drr_refoffset);
|
|
|
|
drrwbr->drr_key.ddk_prop =
|
|
|
|
BSWAP_64(drrwbr->drr_key.ddk_prop);
|
|
|
|
}
|
|
|
|
if (verbose) {
|
|
|
|
(void) printf("WRITE_BYREF object = %llu "
|
2019-03-13 18:19:23 +00:00
|
|
|
"checksum type = %u props = %llx "
|
|
|
|
"offset = %llu length = %llu "
|
|
|
|
"toguid = %llx refguid = %llx "
|
|
|
|
"refobject = %llu refoffset = %llu\n",
|
2012-08-29 19:23:12 +00:00
|
|
|
(u_longlong_t)drrwbr->drr_object,
|
|
|
|
drrwbr->drr_checksumtype,
|
|
|
|
(u_longlong_t)drrwbr->drr_key.ddk_prop,
|
|
|
|
(u_longlong_t)drrwbr->drr_offset,
|
|
|
|
(u_longlong_t)drrwbr->drr_length,
|
|
|
|
(u_longlong_t)drrwbr->drr_toguid,
|
|
|
|
(u_longlong_t)drrwbr->drr_refguid,
|
|
|
|
(u_longlong_t)drrwbr->drr_refobject,
|
|
|
|
(u_longlong_t)drrwbr->drr_refoffset);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case DRR_FREE:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drrf->drr_object = BSWAP_64(drrf->drr_object);
|
|
|
|
drrf->drr_offset = BSWAP_64(drrf->drr_offset);
|
|
|
|
drrf->drr_length = BSWAP_64(drrf->drr_length);
|
|
|
|
}
|
|
|
|
if (verbose) {
|
|
|
|
(void) printf("FREE object = %llu "
|
|
|
|
"offset = %llu length = %lld\n",
|
|
|
|
(u_longlong_t)drrf->drr_object,
|
|
|
|
(u_longlong_t)drrf->drr_offset,
|
|
|
|
(longlong_t)drrf->drr_length);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case DRR_SPILL:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drrs->drr_object = BSWAP_64(drrs->drr_object);
|
|
|
|
drrs->drr_length = BSWAP_64(drrs->drr_length);
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
drrs->drr_compressed_size =
|
|
|
|
BSWAP_64(drrs->drr_compressed_size);
|
|
|
|
drrs->drr_type = BSWAP_32(drrs->drr_type);
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
2018-04-17 18:19:03 +00:00
|
|
|
|
|
|
|
payload_size = DRR_SPILL_PAYLOAD_SIZE(drrs);
|
|
|
|
|
2012-08-29 19:23:12 +00:00
|
|
|
if (verbose) {
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
sprintf_bytes(salt, drrs->drr_salt,
|
|
|
|
ZIO_DATA_SALT_LEN);
|
|
|
|
sprintf_bytes(iv, drrs->drr_iv,
|
|
|
|
ZIO_DATA_IV_LEN);
|
|
|
|
sprintf_bytes(mac, drrs->drr_mac,
|
|
|
|
ZIO_DATA_MAC_LEN);
|
|
|
|
|
2012-08-29 19:23:12 +00:00
|
|
|
(void) printf("SPILL block for object = %llu "
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
"length = %llu flags = %u "
|
|
|
|
"compression type = %u "
|
|
|
|
"compressed_size = %llu "
|
2018-04-17 18:19:03 +00:00
|
|
|
"payload_size = %llu "
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
"salt = %s iv = %s mac = %s\n",
|
|
|
|
(u_longlong_t)drrs->drr_object,
|
|
|
|
(u_longlong_t)drrs->drr_length,
|
|
|
|
drrs->drr_flags,
|
|
|
|
drrs->drr_compressiontype,
|
|
|
|
(u_longlong_t)drrs->drr_compressed_size,
|
2018-04-17 18:19:03 +00:00
|
|
|
(u_longlong_t)payload_size,
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
salt,
|
|
|
|
iv,
|
|
|
|
mac);
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
2018-04-17 18:19:03 +00:00
|
|
|
(void) ssread(buf, payload_size, &zc);
|
2014-11-03 19:44:19 +00:00
|
|
|
if (dump) {
|
2018-04-17 18:19:03 +00:00
|
|
|
print_block(buf, payload_size);
|
2014-11-03 19:44:19 +00:00
|
|
|
}
|
2012-08-29 19:23:12 +00:00
|
|
|
break;
|
2014-06-05 21:19:08 +00:00
|
|
|
case DRR_WRITE_EMBEDDED:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drrwe->drr_object =
|
|
|
|
BSWAP_64(drrwe->drr_object);
|
|
|
|
drrwe->drr_offset =
|
|
|
|
BSWAP_64(drrwe->drr_offset);
|
|
|
|
drrwe->drr_length =
|
|
|
|
BSWAP_64(drrwe->drr_length);
|
|
|
|
drrwe->drr_toguid =
|
|
|
|
BSWAP_64(drrwe->drr_toguid);
|
|
|
|
drrwe->drr_lsize =
|
|
|
|
BSWAP_32(drrwe->drr_lsize);
|
|
|
|
drrwe->drr_psize =
|
|
|
|
BSWAP_32(drrwe->drr_psize);
|
|
|
|
}
|
|
|
|
if (verbose) {
|
|
|
|
(void) printf("WRITE_EMBEDDED object = %llu "
|
2019-03-13 18:19:23 +00:00
|
|
|
"offset = %llu length = %llu "
|
|
|
|
"toguid = %llx comp = %u etype = %u "
|
2014-06-05 21:19:08 +00:00
|
|
|
"lsize = %u psize = %u\n",
|
|
|
|
(u_longlong_t)drrwe->drr_object,
|
|
|
|
(u_longlong_t)drrwe->drr_offset,
|
|
|
|
(u_longlong_t)drrwe->drr_length,
|
|
|
|
(u_longlong_t)drrwe->drr_toguid,
|
|
|
|
drrwe->drr_compression,
|
|
|
|
drrwe->drr_etype,
|
|
|
|
drrwe->drr_lsize,
|
|
|
|
drrwe->drr_psize);
|
|
|
|
}
|
|
|
|
(void) ssread(buf,
|
|
|
|
P2ROUNDUP(drrwe->drr_psize, 8), &zc);
|
2019-02-28 01:55:25 +00:00
|
|
|
if (dump) {
|
|
|
|
print_block(buf,
|
|
|
|
P2ROUNDUP(drrwe->drr_psize, 8));
|
|
|
|
}
|
2019-06-22 23:33:44 +00:00
|
|
|
payload_size = P2ROUNDUP(drrwe->drr_psize, 8);
|
2014-06-05 21:19:08 +00:00
|
|
|
break;
|
Native Encryption for ZFS on Linux
This change incorporates three major pieces:
The first change is a keystore that manages wrapping
and encryption keys for encrypted datasets. These
commands mostly involve manipulating the new
DSL Crypto Key ZAP Objects that live in the MOS. Each
encrypted dataset has its own DSL Crypto Key that is
protected with a user's key. This level of indirection
allows users to change their keys without re-encrypting
their entire datasets. The change implements the new
subcommands "zfs load-key", "zfs unload-key" and
"zfs change-key" which allow the user to manage their
encryption keys and settings. In addition, several new
flags and properties have been added to allow dataset
creation and to make mounting and unmounting more
convenient.
The second piece of this patch provides the ability to
encrypt, decyrpt, and authenticate protected datasets.
Each object set maintains a Merkel tree of Message
Authentication Codes that protect the lower layers,
similarly to how checksums are maintained. This part
impacts the zio layer, which handles the actual
encryption and generation of MACs, as well as the ARC
and DMU, which need to be able to handle encrypted
buffers and protected data.
The last addition is the ability to do raw, encrypted
sends and receives. The idea here is to send raw
encrypted and compressed data and receive it exactly
as is on a backup system. This means that the dataset
on the receiving system is protected using the same
user key that is in use on the sending side. By doing
so, datasets can be efficiently backed up to an
untrusted system without fear of data being
compromised.
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Signed-off-by: Tom Caputi <tcaputi@datto.com>
Closes #494
Closes #5769
2017-08-14 17:36:48 +00:00
|
|
|
case DRR_OBJECT_RANGE:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drror->drr_firstobj =
|
|
|
|
BSWAP_64(drror->drr_firstobj);
|
|
|
|
drror->drr_numslots =
|
|
|
|
BSWAP_64(drror->drr_numslots);
|
|
|
|
drror->drr_toguid = BSWAP_64(drror->drr_toguid);
|
|
|
|
}
|
|
|
|
if (verbose) {
|
|
|
|
sprintf_bytes(salt, drror->drr_salt,
|
|
|
|
ZIO_DATA_SALT_LEN);
|
|
|
|
sprintf_bytes(iv, drror->drr_iv,
|
|
|
|
ZIO_DATA_IV_LEN);
|
|
|
|
sprintf_bytes(mac, drror->drr_mac,
|
|
|
|
ZIO_DATA_MAC_LEN);
|
|
|
|
|
|
|
|
(void) printf("OBJECT_RANGE firstobj = %llu "
|
|
|
|
"numslots = %llu flags = %u "
|
|
|
|
"salt = %s iv = %s mac = %s\n",
|
|
|
|
(u_longlong_t)drror->drr_firstobj,
|
|
|
|
(u_longlong_t)drror->drr_numslots,
|
|
|
|
drror->drr_flags,
|
|
|
|
salt,
|
|
|
|
iv,
|
|
|
|
mac);
|
|
|
|
}
|
|
|
|
break;
|
Implement Redacted Send/Receive
Redacted send/receive allows users to send subsets of their data to
a target system. One possible use case for this feature is to not
transmit sensitive information to a data warehousing, test/dev, or
analytics environment. Another is to save space by not replicating
unimportant data within a given dataset, for example in backup tools
like zrepl.
Redacted send/receive is a three-stage process. First, a clone (or
clones) is made of the snapshot to be sent to the target. In this
clone (or clones), all unnecessary or unwanted data is removed or
modified. This clone is then snapshotted to create the "redaction
snapshot" (or snapshots). Second, the new zfs redact command is used
to create a redaction bookmark. The redaction bookmark stores the
list of blocks in a snapshot that were modified by the redaction
snapshot(s). Finally, the redaction bookmark is passed as a parameter
to zfs send. When sending to the snapshot that was redacted, the
redaction bookmark is used to filter out blocks that contain sensitive
or unwanted information, and those blocks are not included in the send
stream. When sending from the redaction bookmark, the blocks it
contains are considered as candidate blocks in addition to those
blocks in the destination snapshot that were modified since the
creation_txg of the redaction bookmark. This step is necessary to
allow the target to rehydrate data in the case where some blocks are
accidentally or unnecessarily modified in the redaction snapshot.
The changes to bookmarks to enable fast space estimation involve
adding deadlists to bookmarks. There is also logic to manage the
life cycles of these deadlists.
The new size estimation process operates in cases where previously
an accurate estimate could not be provided. In those cases, a send
is performed where no data blocks are read, reducing the runtime
significantly and providing a byte-accurate size estimate.
Reviewed-by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed-by: Matt Ahrens <mahrens@delphix.com>
Reviewed-by: Prashanth Sreenivasa <pks@delphix.com>
Reviewed-by: John Kennedy <john.kennedy@delphix.com>
Reviewed-by: George Wilson <george.wilson@delphix.com>
Reviewed-by: Chris Williamson <chris.williamson@delphix.com>
Reviewed-by: Pavel Zhakarov <pavel.zakharov@delphix.com>
Reviewed-by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed-by: Prakash Surya <prakash.surya@delphix.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Paul Dagnelie <pcd@delphix.com>
Closes #7958
2019-06-19 16:48:13 +00:00
|
|
|
case DRR_REDACT:
|
|
|
|
if (do_byteswap) {
|
|
|
|
drrr->drr_object = BSWAP_64(drrr->drr_object);
|
|
|
|
drrr->drr_offset = BSWAP_64(drrr->drr_offset);
|
|
|
|
drrr->drr_length = BSWAP_64(drrr->drr_length);
|
|
|
|
drrr->drr_toguid = BSWAP_64(drrr->drr_toguid);
|
|
|
|
}
|
|
|
|
if (verbose) {
|
|
|
|
(void) printf("REDACT object = %llu offset = "
|
|
|
|
"%llu length = %llu\n",
|
|
|
|
(u_longlong_t)drrr->drr_object,
|
|
|
|
(u_longlong_t)drrr->drr_offset,
|
|
|
|
(u_longlong_t)drrr->drr_length);
|
|
|
|
}
|
|
|
|
break;
|
2012-08-29 19:23:12 +00:00
|
|
|
case DRR_NUMTYPES:
|
|
|
|
/* should never be reached */
|
|
|
|
exit(1);
|
|
|
|
}
|
2015-07-06 03:20:31 +00:00
|
|
|
if (drr->drr_type != DRR_BEGIN && very_verbose) {
|
|
|
|
(void) printf(" checksum = %llx/%llx/%llx/%llx\n",
|
|
|
|
(longlong_t)drrc->drr_checksum.zc_word[0],
|
|
|
|
(longlong_t)drrc->drr_checksum.zc_word[1],
|
|
|
|
(longlong_t)drrc->drr_checksum.zc_word[2],
|
|
|
|
(longlong_t)drrc->drr_checksum.zc_word[3]);
|
|
|
|
}
|
2012-08-29 19:23:12 +00:00
|
|
|
pcksum = zc;
|
2019-06-22 23:33:44 +00:00
|
|
|
drr_byte_count[drr->drr_type] += payload_size;
|
|
|
|
total_payload_size += payload_size;
|
2012-08-29 19:23:12 +00:00
|
|
|
}
|
|
|
|
free(buf);
|
2016-11-29 21:47:05 +00:00
|
|
|
fletcher_4_fini();
|
2012-08-29 19:23:12 +00:00
|
|
|
|
|
|
|
/* Print final summary */
|
|
|
|
|
|
|
|
(void) printf("SUMMARY:\n");
|
2019-06-22 23:33:44 +00:00
|
|
|
(void) printf("\tTotal DRR_BEGIN records = %lld (%llu bytes)\n",
|
|
|
|
(u_longlong_t)drr_record_count[DRR_BEGIN],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_BEGIN]);
|
|
|
|
(void) printf("\tTotal DRR_END records = %lld (%llu bytes)\n",
|
|
|
|
(u_longlong_t)drr_record_count[DRR_END],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_END]);
|
|
|
|
(void) printf("\tTotal DRR_OBJECT records = %lld (%llu bytes)\n",
|
|
|
|
(u_longlong_t)drr_record_count[DRR_OBJECT],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_OBJECT]);
|
|
|
|
(void) printf("\tTotal DRR_FREEOBJECTS records = %lld (%llu bytes)\n",
|
|
|
|
(u_longlong_t)drr_record_count[DRR_FREEOBJECTS],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_FREEOBJECTS]);
|
|
|
|
(void) printf("\tTotal DRR_WRITE records = %lld (%llu bytes)\n",
|
|
|
|
(u_longlong_t)drr_record_count[DRR_WRITE],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_WRITE]);
|
|
|
|
(void) printf("\tTotal DRR_WRITE_BYREF records = %lld (%llu bytes)\n",
|
|
|
|
(u_longlong_t)drr_record_count[DRR_WRITE_BYREF],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_WRITE_BYREF]);
|
|
|
|
(void) printf("\tTotal DRR_WRITE_EMBEDDED records = %lld (%llu "
|
|
|
|
"bytes)\n", (u_longlong_t)drr_record_count[DRR_WRITE_EMBEDDED],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_WRITE_EMBEDDED]);
|
|
|
|
(void) printf("\tTotal DRR_FREE records = %lld (%llu bytes)\n",
|
|
|
|
(u_longlong_t)drr_record_count[DRR_FREE],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_FREE]);
|
|
|
|
(void) printf("\tTotal DRR_SPILL records = %lld (%llu bytes)\n",
|
|
|
|
(u_longlong_t)drr_record_count[DRR_SPILL],
|
|
|
|
(u_longlong_t)drr_byte_count[DRR_SPILL]);
|
2012-08-29 19:23:12 +00:00
|
|
|
(void) printf("\tTotal records = %lld\n",
|
2014-06-05 21:19:08 +00:00
|
|
|
(u_longlong_t)total_records);
|
2019-06-22 23:33:44 +00:00
|
|
|
(void) printf("\tTotal payload size = %lld (0x%llx)\n",
|
|
|
|
(u_longlong_t)total_payload_size, (u_longlong_t)total_payload_size);
|
|
|
|
(void) printf("\tTotal header overhead = %lld (0x%llx)\n",
|
|
|
|
(u_longlong_t)total_overhead_size,
|
|
|
|
(u_longlong_t)total_overhead_size);
|
2012-08-29 19:23:12 +00:00
|
|
|
(void) printf("\tTotal stream length = %lld (0x%llx)\n",
|
|
|
|
(u_longlong_t)total_stream_len, (u_longlong_t)total_stream_len);
|
|
|
|
return (0);
|
|
|
|
}
|