OpenZFS 7252 - compressed zfs send / receive

OpenZFS 7252 - compressed zfs send / receive
OpenZFS 7628 - create long versions of ZFS send / receive options

Authored by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: John Kennedy <john.kennedy@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Paul Dagnelie <pcd@delphix.com>
Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed by: David Quigley <dpquigl@davequigley.com>
Reviewed by: Thomas Caputi <tcaputi@datto.com>
Approved by: Dan McDonald <danmcd@omniti.com>
Reviewed by: David Quigley <dpquigl@davequigley.com>
Reviewed-by: loli10K <ezomori.nozomu@gmail.com>
Ported-by: bunder2015 <omfgbunder@gmail.com>
Ported-by: Don Brady <don.brady@intel.com>
Ported-by: Brian Behlendorf <behlendorf1@llnl.gov>

Porting Notes:
- Most of 7252 was already picked up during ABD work.  This
  commit represents the gap from the final commit to openzfs.
- Fixed split_large_blocks check in do_dump()
- An alternate version of the write_compressible() function was
  implemented for Linux which does not depend on fio.  The behavior
  of fio differs significantly based on the exact version.
- mkholes was replaced with truncate for Linux.

OpenZFS-issue: https://www.illumos.org/issues/7252
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/5602294
Closes #6067
This commit is contained in:
Dan Kimmel 2017-04-11 21:56:54 +00:00 committed by Brian Behlendorf
parent 7a25f0891e
commit a7004725d0
41 changed files with 1668 additions and 104 deletions

View File

@ -32,6 +32,7 @@
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
@ -3771,8 +3772,23 @@ zfs_do_send(int argc, char **argv)
nvlist_t *dbgnv = NULL;
boolean_t extraverbose = B_FALSE;
struct option long_options[] = {
{"replicate", no_argument, NULL, 'R'},
{"props", no_argument, NULL, 'p'},
{"parsable", no_argument, NULL, 'P'},
{"dedup", no_argument, NULL, 'D'},
{"verbose", no_argument, NULL, 'v'},
{"dryrun", no_argument, NULL, 'n'},
{"large-block", no_argument, NULL, 'L'},
{"embed", no_argument, NULL, 'e'},
{"resume", required_argument, NULL, 't'},
{"compressed", no_argument, NULL, 'c'},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt(argc, argv, ":i:I:RDpvnPLet:c")) != -1) {
while ((c = getopt_long(argc, argv, ":i:I:RDpvnPLet:c", long_options,
NULL)) != -1) {
switch (c) {
case 'i':
if (fromname)

View File

@ -430,8 +430,8 @@ main(int argc, char *argv[])
drro->drr_bonuslen);
}
if (drro->drr_bonuslen > 0) {
(void) ssread(buf, P2ROUNDUP(drro->drr_bonuslen,
8), &zc);
(void) ssread(buf,
P2ROUNDUP(drro->drr_bonuslen, 8), &zc);
if (dump) {
print_block(buf,
P2ROUNDUP(drro->drr_bonuslen, 8));

View File

@ -493,10 +493,10 @@ lzc_send_resume(const char *snapname, const char *from, int fd,
fnvlist_add_string(args, "fromsnap", from);
if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
fnvlist_add_boolean(args, "largeblockok");
if (flags & LZC_SEND_FLAG_COMPRESS)
fnvlist_add_boolean(args, "compressok");
if (flags & LZC_SEND_FLAG_EMBED_DATA)
fnvlist_add_boolean(args, "embedok");
if (flags & LZC_SEND_FLAG_COMPRESS)
fnvlist_add_boolean(args, "compressok");
if (resumeobj != 0 || resumeoff != 0) {
fnvlist_add_uint64(args, "resume_object", resumeobj);
fnvlist_add_uint64(args, "resume_offset", resumeoff);

View File

@ -2755,7 +2755,7 @@ Generate a stream package that sends all intermediary snapshots from the first s
.sp
.ne 2
.na
\fB\fB-R\fR\fR
\fB\fB-R\fR, \fB--replicate\fR\fR
.ad
.sp .6
.RS 4n
@ -2767,7 +2767,7 @@ If the \fB-i\fR or \fB-I\fR flags are used in conjunction with the \fB-R\fR flag
.sp
.ne 2
.na
\fB\fB-D\fR\fR
\fB\fB-D\fR, \fB--dedup\fR\fR
.ad
.sp .6
.RS 4n
@ -2777,7 +2777,7 @@ Generate a deduplicated stream. Blocks which would have been sent multiple times
.sp
.ne 2
.na
\fB\fB-L\fR\fR
\fB\fB-L\fR, \fB--large-block\fR\fR
.ad
.sp .6
.RS 4n
@ -2792,7 +2792,7 @@ flags and the \fBlarge_blocks\fR feature.
.sp
.ne 2
.na
\fB\fB-e\fR\fR
\fB\fB-e\fR, \fB--embed\fR\fR
.ad
.sp .6
.RS 4n
@ -2825,7 +2825,7 @@ decompressed before sending so it can be split into smaller block sizes.
.sp
.ne 2
.na
\fB\fB-p\fR\fR
\fB\fB-p\fR, \fB--props\fR\fR
.ad
.sp .6
.RS 4n
@ -2835,7 +2835,7 @@ Include the dataset's properties in the stream. This flag is implicit when -R i
.sp
.ne 2
.na
\fB\fB-n\fR\fR
\fB\fB-n\fR, \fB--dryrun\fR\fR
.ad
.sp .6
.RS 4n
@ -2849,7 +2849,7 @@ to standard output and the verbose output goes to standard error).
.sp
.ne 2
.na
\fB\fB-P\fR\fR
\fB\fB-P\fR, \fB--parsable\fR\fR
.ad
.sp .6
.RS 4n
@ -2859,7 +2859,7 @@ Print machine-parsable verbose information about the stream package generated.
.sp
.ne 2
.na
\fB\fB-v\fR\fR
\fB\fB-v\fR, \fB--verbose\fR\fR
.ad
.sp .6
.RS 4n
@ -2887,7 +2887,7 @@ name will be "--head--".
.sp
.ne 2
.na
\fB\fB-L\fR\fR
\fB\fB-L\fR, \fB--large-block\fR\fR
.ad
.sp .6
.RS 4n
@ -2902,7 +2902,7 @@ flags and the \fBlarge_blocks\fR feature.
.sp
.ne 2
.na
\fB\fB-e\fR\fR
\fB\fB-e\fR, \fB--embed\fR\fR
.ad
.sp .6
.RS 4n

View File

@ -1350,6 +1350,10 @@ arc_buf_is_shared(arc_buf_t *buf)
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
@ -1362,6 +1366,21 @@ arc_cksum_free(arc_buf_hdr_t *hdr)
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
@ -1377,6 +1396,8 @@ arc_cksum_verify(arc_buf_t *buf)
return;
if (ARC_BUF_COMPRESSED(buf)) {
ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
arc_hdr_has_uncompressed_buf(hdr));
return;
}
@ -1483,6 +1504,7 @@ arc_cksum_compute(arc_buf_t *buf)
mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
ASSERT(arc_hdr_has_uncompressed_buf(hdr));
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
} else if (ARC_BUF_COMPRESSED(buf)) {
@ -1513,7 +1535,7 @@ arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, HDR_GET_LSIZE(buf->b_hdr),
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
#endif
@ -1580,6 +1602,8 @@ arc_buf_thaw(arc_buf_t *buf)
* allocate b_thawed.
*/
if (ARC_BUF_COMPRESSED(buf)) {
ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
arc_hdr_has_uncompressed_buf(hdr));
return;
}
@ -1598,6 +1622,8 @@ arc_buf_freeze(arc_buf_t *buf)
return;
if (ARC_BUF_COMPRESSED(buf)) {
ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
arc_hdr_has_uncompressed_buf(hdr));
return;
}
@ -1671,14 +1697,13 @@ static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_buf_t *from;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (from = hdr->b_l1hdr.b_buf; from != NULL;
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
@ -2294,7 +2319,6 @@ arc_space_return(uint64_t space, arc_space_type_t type)
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
boolean_t hdr_compressed, buf_compressed;
/*
* The criteria for sharing a hdr's data are:
* 1. the hdr's compression matches the buf's compression
@ -2317,8 +2341,8 @@ arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
hdr_compressed = HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF;
buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
boolean_t hdr_compressed = HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
@ -2335,7 +2359,6 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed,
boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
boolean_t can_share;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
@ -2371,14 +2394,6 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed,
if (compressed && HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF)
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
/*
* Although the ARC should handle it correctly, levels above the ARC
* should prevent us from having multiple compressed bufs off the same
* hdr. To ensure we notice it if this behavior changes, we assert this
* here the best we can.
*/
IMPLY(ARC_BUF_COMPRESSED(buf), !HDR_SHARED_DATA(hdr));
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
@ -2392,7 +2407,7 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed,
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware.
*/
can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) &&
boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) &&
abd_is_linear(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
@ -2423,6 +2438,15 @@ arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed,
static char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
@ -2435,7 +2459,8 @@ arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
atomic_add_64(&arc_loaned_bytes, size);
arc_loaned_bytes_update(size);
return (buf);
}
@ -2446,7 +2471,8 @@ arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type);
atomic_add_64(&arc_loaned_bytes, psize);
arc_loaned_bytes_update(psize);
return (buf);
}
@ -2464,7 +2490,7 @@ arc_return_buf(arc_buf_t *buf, void *tag)
(void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
atomic_add_64(&arc_loaned_bytes, -arc_buf_size(buf));
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
@ -2478,7 +2504,7 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
(void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
atomic_add_64(&arc_loaned_bytes, -arc_buf_size(buf));
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
@ -2589,12 +2615,12 @@ arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
@ -2629,7 +2655,6 @@ arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_t *lastbuf;
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
@ -2660,7 +2685,7 @@ arc_buf_destroy_impl(arc_buf_t *buf)
hdr->b_l1hdr.b_bufcnt -= 1;
}
lastbuf = arc_buf_remove(hdr, buf);
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
@ -2705,8 +2730,13 @@ arc_buf_destroy_impl(arc_buf_t *buf)
HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF);
}
if (hdr->b_l1hdr.b_bufcnt == 0)
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
@ -2895,12 +2925,11 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
arc_buf_t *
arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
{
arc_buf_t *buf;
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
ZIO_COMPRESS_OFF, type);
ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
buf = NULL;
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, tag, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
@ -2915,18 +2944,16 @@ arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT(compression_type > ZIO_COMPRESS_OFF);
ASSERT(compression_type < ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
compression_type, ARC_BUFC_DATA);
ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
buf = NULL;
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, tag, B_TRUE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
@ -4888,7 +4915,7 @@ arc_read_done(zio_t *zio)
arc_callback_t *acb;
boolean_t freeable = B_FALSE;
boolean_t no_zio_error = (zio->io_error == 0);
int callback_cnt = 0;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
@ -4951,16 +4978,15 @@ arc_read_done(zio_t *zio)
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
int error;
if (!acb->acb_done)
continue;
/* This is a demand read since prefetches don't use callbacks */
callback_cnt++;
error = arc_buf_alloc_impl(hdr, acb->acb_private,
int error = arc_buf_alloc_impl(hdr, acb->acb_private,
acb->acb_compressed, no_zio_error, &acb->acb_buf);
if (no_zio_error) {
zio->io_error = error;
@ -5119,6 +5145,7 @@ top:
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
if (pio != NULL)
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
@ -5580,7 +5607,6 @@ arc_release(arc_buf_t *buf, void *tag)
uint64_t lsize = HDR_GET_LSIZE(hdr);
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
arc_buf_t *lastbuf = NULL;
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
@ -5596,7 +5622,7 @@ arc_release(arc_buf_t *buf, void *tag)
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
lastbuf = arc_buf_remove(hdr, buf);
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
@ -5609,9 +5635,7 @@ arc_release(arc_buf_t *buf, void *tag)
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t. Then, setup a new
* block sharing relationship with the last buffer
* on the arc_buf_t list.
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
@ -5951,6 +5975,7 @@ arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_OFF);
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
zio_flags |= ZIO_FLAG_RAW;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
@ -6073,6 +6098,10 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
arc_loaned_bytes), 0);

View File

@ -380,7 +380,6 @@ static void
dbuf_evict_user(dmu_buf_impl_t *db)
{
dmu_buf_user_t *dbu = db->db_user;
boolean_t has_async;
ASSERT(MUTEX_HELD(&db->db_mtx));
@ -405,7 +404,7 @@ dbuf_evict_user(dmu_buf_impl_t *db)
* containing the dbu. In that case we need to take care to not
* dereference dbu after calling the sync evict func.
*/
has_async = (dbu->dbu_evict_func_async != NULL);
boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
if (dbu->dbu_evict_func_sync != NULL)
dbu->dbu_evict_func_sync(dbu);
@ -3308,8 +3307,8 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
if (compress_type == ZIO_COMPRESS_OFF) {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
} else {
int lsize = arc_buf_lsize(*datap);
ASSERT3U(type, ==, ARC_BUFC_DATA);
int lsize = arc_buf_lsize(*datap);
*datap = arc_alloc_compressed_buf(os->os_spa, db,
psize, lsize, compress_type);
}

View File

@ -1889,6 +1889,10 @@ dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp,
boolean_t nopwrite = B_FALSE;
boolean_t dedup_verify = os->os_dedup_verify;
int copies = os->os_copies;
boolean_t lz4_ac = spa_feature_is_active(os->os_spa,
SPA_FEATURE_LZ4_COMPRESS);
IMPLY(override_compress == ZIO_COMPRESS_LZ4, lz4_ac);
/*
* We maintain different write policies for each of the following

View File

@ -699,15 +699,13 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
arc_buf_t *abuf;
int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
uint64_t offset;
enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
/*
* If we have large blocks stored on disk but the send flags
* don't allow us to send large blocks, we split the data from
* the arc buf into chunks.
*/
boolean_t split_large_blocks =
data->datablkszsec > SPA_OLD_MAXBLOCKSIZE &&
boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE &&
!(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
/*
* We should only request compressed data from the ARC if all
@ -729,17 +727,19 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
(zb->zb_object == dsa->dsa_resume_object &&
zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
ASSERT3U(blksz, ==, BP_GET_LSIZE(bp));
enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
if (request_compressed)
zioflags |= ZIO_FLAG_RAW;
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_ASYNC_READ, zioflags,
&aflags, zb) != 0) {
ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) {
if (zfs_send_corrupt_data) {
uint64_t *ptr;
/* Send a block filled with 0x"zfs badd bloc" */
abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA,
blksz);
uint64_t *ptr;
for (ptr = abuf->b_data;
(char *)ptr < (char *)abuf->b_data + blksz;
ptr++)
@ -752,9 +752,9 @@ do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
offset = zb->zb_blkid * blksz;
if (split_large_blocks) {
char *buf = abuf->b_data;
ASSERT3U(arc_get_compression(abuf), ==,
ZIO_COMPRESS_OFF);
char *buf = abuf->b_data;
while (blksz > 0 && err == 0) {
int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
err = dump_write(dsa, type, zb->zb_object,

View File

@ -87,7 +87,7 @@ lz4_decompress_zfs(void *s_start, void *d_start, size_t s_len,
/*
* Returns 0 on success (decompression function returned non-negative)
* and non-zero on failure (decompression function returned negative.
* and non-zero on failure (decompression function returned negative).
*/
return (LZ4_uncompress_unknownOutputSize(&src[sizeof (bufsiz)],
d_start, bufsiz, d_len) < 0);

View File

@ -568,15 +568,21 @@ tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
# DISABLED:
# rsend_008_pos - Fails for OpenZFS on illumos
# rsend_008_pos - https://github.com/zfsonlinux/zfs/issues/6066
# rsend_009_pos - https://github.com/zfsonlinux/zfs/issues/5887
# rsend_020_pos - ASSERTs in dump_record()
[tests/functional/rsend]
tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos',
'rsend_005_pos', 'rsend_006_pos', 'rsend_007_pos',
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos',
'rsend_013_pos', 'rsend_014_pos', 'rsend_019_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos']
'rsend_013_pos', 'rsend_014_pos',
'rsend_019_pos', 'rsend_020_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos',
'send-c_verify_ratio', 'send-c_verify_contents', 'send-c_props',
'send-c_incremental', 'send-c_volume', 'send-c_zstreamdump',
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
'send-c_mixed_compression', 'send-c_stream_size_estimate', 'send-cD',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',

View File

@ -134,7 +134,8 @@ export ZFS_FILES='zdb
arc_summary.py
arcstat.py
dbufstat.py
zed'
zed
zstreamdump'
export ZFSTEST_FILES='chg_usr_exec
devname2devid

View File

@ -29,6 +29,7 @@
#
. ${STF_TOOLS}/include/logapi.shlib
. ${STF_SUITE}/include/math.shlib
#
# Apply constrained path when available. This is required since the
@ -3111,6 +3112,73 @@ function get_min
echo $min
}
#
# Generate a random number between 1 and the argument.
#
function random
{
typeset max=$1
echo $(( ($RANDOM % $max) + 1 ))
}
# Write data that can be compressed into a directory
function write_compressible
{
typeset dir=$1
typeset megs=$2
typeset nfiles=${3:-1}
typeset bs=${4:-1024k}
typeset fname=${5:-file}
[[ -d $dir ]] || log_fail "No directory: $dir"
# Under Linux fio is not currently used since its behavior can
# differ significantly across versions. This includes missing
# command line options and cases where the --buffer_compress_*
# options fail to behave as expected.
if is_linux; then
typeset file_bytes=$(to_bytes $megs)
typeset bs_bytes=4096
typeset blocks=$(($file_bytes / $bs_bytes))
for (( i = 0; i < $nfiles; i++ )); do
truncate -s $file_bytes $dir/$fname.$i
# Write every third block to get 66% compression.
for (( j = 0; j < $blocks; j += 3 )); do
dd if=/dev/urandom of=$dir/$fname.$i \
seek=$j bs=$bs_bytes count=1 \
conv=notrunc >/dev/null 2>&1
done
done
else
log_must eval "fio \
--name=job \
--fallocate=0 \
--minimal \
--randrepeat=0 \
--buffer_compress_percentage=66 \
--buffer_compress_chunk=4096 \
--directory=$dir \
--numjobs=$nfiles \
--nrfiles=$nfiles \
--rw=write \
--bs=$bs \
--filesize=$megs \
--filename_format='$fname.\$jobnum' >/dev/null"
fi
}
function get_objnum
{
typeset pathname=$1
typeset objnum
[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
objnum=$(stat -c %i $pathname)
echo $objnum
}
#
# Synchronize all the data in pool
#

View File

@ -41,3 +41,28 @@ function within_percent
return 1
}
#
# Return 0 if the human readable string of the form <value>[suffix] can
# be converted to bytes. Allow suffixes are shown in the table below.
#
function to_bytes
{
typeset size=$1
typeset value=$(echo "$size" | grep -o '[0-9]\+')
case $size in
*PB|*pb|*P|*p) factor='1024^5' ;;
*TB|*tb|*T|*t) factor='1024^4' ;;
*GB|*gb|*G|*g) factor='1024^3' ;;
*MB|*mb|*M|*m) factor='1024^2' ;;
*KB|*kb|*K|*k) factor='1024^1' ;;
*B|*b) factor='1024^0' ;;
*[!0-9.]*) return 1 ;;
*) factor='1024^0' ;;
esac
echo "$value * ($factor)" | bc
return 0
}

View File

@ -13,11 +13,26 @@
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
#
typeset -a compress_props=('on' 'off' 'lzjb' 'gzip' 'gzip-1' 'gzip-2' 'gzip-3'
'gzip-4' 'gzip-5' 'gzip-6' 'gzip-7' 'gzip-8' 'gzip-9' 'zle')
typeset -a compress_prop_vals=('on' 'off' 'lzjb' 'gzip' 'gzip-1' 'gzip-2'
'gzip-3' 'gzip-4' 'gzip-5' 'gzip-6' 'gzip-7' 'gzip-8' 'gzip-9' 'zle' 'lz4')
typeset -a checksum_prop_vals=('on' 'off' 'fletcher2' 'fletcher4' 'sha256'
'noparity' 'sha512' 'skein' 'edonr')
typeset -a recsize_prop_vals=('512' '1024' '2048' '4096' '8192' '16384'
'32768' '65536' '131072' '262144' '524288' '1048576')
typeset -a canmount_prop_vals=('on' 'off' 'noauto')
typeset -a copies_prop_vals=('1' '2' '3')
typeset -a logbias_prop_vals=('latency' 'throughput')
typeset -a primarycache_prop_vals=('all' 'none' 'metadata')
typeset -a redundant_metadata_prop_vals=('all' 'most')
typeset -a secondarycache_prop_vals=('all' 'none' 'metadata')
typeset -a snapdir_prop_vals=('hidden' 'visible')
typeset -a sync_prop_vals=('standard' 'always' 'disabled')
typeset -a checksum_props=('on' 'off' 'fletcher2' 'fletcher4' 'sha256' 'sha512'
'edonr' 'skein' 'noparity')
typeset -a fs_props=('compress' 'checksum' 'recsize'
'canmount' 'copies' 'logbias' 'primarycache' 'redundant_metadata'
'secondarycache' 'snapdir' 'sync')
typeset -a vol_props=('compress' 'checksum' 'copies' 'logbias' 'primarycache'
'secondarycache' 'redundant_metadata' 'sync')
#
# Given the property array passed in, return 'num_props' elements to the
@ -45,20 +60,81 @@ function get_rand_prop
function get_rand_compress
{
get_rand_prop compress_props $1 2
get_rand_prop compress_prop_vals $1 2
}
function get_rand_compress_any
{
get_rand_prop compress_props $1 0
get_rand_prop compress_prop_vals $1 0
}
function get_rand_checksum
{
get_rand_prop checksum_props $1 2
get_rand_prop checksum_prop_vals $1 2
}
function get_rand_checksum_any
{
get_rand_prop checksum_props $1 0
get_rand_prop checksum_prop_vals $1 0
}
function get_rand_recsize
{
get_rand_prop recsize_prop_vals $1 0
}
function get_rand_large_recsize
{
get_rand_prop recsize_prop_vals $1 9
}
#
# Functions to toggle on/off properties
#
typeset -a binary_props=('atime' 'devices' 'exec' 'nbmand' 'readonly' 'setuid'
'xattr' 'zoned')
function toggle_prop
{
typeset ds=$1
typeset prop=$2
datasetexists $ds || log_fail "$ds does not exist"
typeset val=$(get_prop $prop $ds)
typeset newval='off'
[[ $val = $newval ]] && newval='on'
log_must zfs set $prop=$newval $ds
}
function toggle_binary_props
{
typeset ds=$1
typeset prop
for prop in "${binary_props[@]}"; do
toggle_prop $ds $prop
done
}
function randomize_ds_props
{
typeset ds=$1
typeset prop proplist val
datasetexists $ds || log_fail "$ds does not exist"
if ds_is_volume $ds; then
toggle_prop $ds readonly
proplist="${vol_props[@]}"
elif ds_is_filesystem $ds; then
toggle_binary_props $ds
proplist="${fs_props[@]}"
else
log_fail "$ds is neither a volume nor a file system"
fi
for prop in $proplist; do
typeset val=$(get_rand_prop "${prop}_prop_vals" 1 0)
log_must zfs set $prop=$val $ds
done
}

View File

@ -22,4 +22,19 @@ dist_pkgdata_SCRIPTS = \
rsend_020_pos.ksh \
rsend_021_pos.ksh \
rsend_022_pos.ksh \
rsend_024_pos.ksh
rsend_024_pos.ksh \
send-cD.ksh \
send-c_embedded_blocks.ksh \
send-c_incremental.ksh \
send-c_lz4_disabled.ksh \
send-c_mixed_compression.ksh \
send-c_props.ksh \
send-c_recv_dedup.ksh \
send-c_recv_lz4_disabled.ksh \
send-c_resume.ksh \
send-c_stream_size_estimate.ksh \
send-c_verify_contents.ksh \
send-c_verify_ratio.ksh \
send-c_volume.ksh \
send-c_zstreamdump.ksh \
send-cpL_varied_recsize.ksh

View File

@ -29,11 +29,11 @@
export BACKDIR=${TEST_BASE_DIR%%/}/backdir-rsend
export DISK1=${DISKS%% *}
export DISK1=$(echo $DISKS | awk '{print $1}')
export DISK2=$(echo $DISKS | awk '{print $2}')
export DISK3=$(echo $DISKS | awk '{print $3}')
export POOL=$TESTPOOL
export POOL2=$TESTPOOL1
export POOL3=$TESTPOOL2
export POOL2=$TESTPOOL2
export POOL3=$TESTPOOL3
export FS=$TESTFS

View File

@ -29,6 +29,7 @@
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/include/math.shlib
. $STF_SUITE/tests/functional/rsend/rsend.cfg
#
@ -207,11 +208,10 @@ function cmp_ds_prop
typeset dtst1=$1
typeset dtst2=$2
for item in "type" "origin" "volblocksize" "aclinherit" "acltype" \
for item in "type" "origin" "volblocksize" "acltype" "dnodesize" \
"atime" "canmount" "checksum" "compression" "copies" "devices" \
"dnodesize" "exec" "quota" "readonly" "recordsize" "reservation" \
"setuid" "snapdir" "version" "volsize" "xattr" "zoned" \
"mountpoint";
"exec" "quota" "readonly" "recordsize" "reservation" "setuid" \
"snapdir" "version" "volsize" "xattr" "zoned" "mountpoint";
do
zfs get -H -o property,value,source $item $dtst1 >> \
$BACKDIR/dtst1
@ -525,20 +525,20 @@ function test_fs_setup
mk_files 100 1048576 0 $sendfs &
mk_files 10 10485760 0 $sendfs &
mk_files 1 104857600 0 $sendfs &
wait
log_must wait
log_must zfs snapshot $sendfs@a
rm_files 200 256 0 $sendfs &
rm_files 200 131072 0 $sendfs &
rm_files 20 1048576 0 $sendfs &
rm_files 2 10485760 0 $sendfs &
wait
log_must wait
mk_files 400 256 0 $sendfs &
mk_files 400 131072 0 $sendfs &
mk_files 40 1048576 0 $sendfs &
mk_files 4 10485760 0 $sendfs &
wait
log_must wait
log_must zfs snapshot $sendfs@b
log_must eval "zfs send -v $sendfs@a >/$sendpool/initial.zsend"
@ -551,3 +551,121 @@ function test_fs_setup
fi
log_must zfs create -o compress=lz4 $sendpool/stream
}
#
# Check to see if the specified features are set in a send stream.
# The values for these features are found in include/sys/zfs_ioctl.h
#
# $1 The stream file
# $2-$n The flags expected in the stream
#
function stream_has_features
{
typeset file=$1
shift
[[ -f $file ]] || log_fail "Couldn't find file: $file"
typeset flags=$(cat $file | zstreamdump | \
awk '/features =/ {features = $3} END {print features}')
typeset -A feature
feature[dedup]="1"
feature[dedupprops]="2"
feature[sa_spill]="4"
feature[embed_data]="10000"
feature[lz4]="20000"
feature[mooch_byteswap]="40000"
feature[large_blocks]="80000"
feature[resuming]="100000"
feature[redacted]="200000"
feature[compressed]="400000"
typeset flag known derived=0
for flag in "$@"; do
known=${feature[$flag]}
[[ -z $known ]] && log_fail "Unknown feature: $flag"
derived=$(printf "%x" $((0x${flags} & 0x${feature[$flag]})))
[[ $derived = $known ]] || return 1
done
return 0
}
#
# Parse zstreamdump -v output. The output varies for each kind of record:
# BEGIN records are simply output as "BEGIN"
# END records are output as "END"
# OBJECT records become "OBJECT <object num>"
# FREEOBJECTS records become "FREEOBJECTS <startobj> <numobjs>"
# FREE records become "<record type> <start> <length>"
# WRITE records become:
# "<record type> <compression type> <start> <logical size> <compressed size>
# <data size>"
#
function parse_dump
{
sed '/^WRITE/{N;s/\n/ /;}' | grep "^[A-Z]" | awk '{
if ($1 == "BEGIN" || $1 == "END") print $1
if ($1 == "OBJECT") print $1" "$4
if ($1 == "FREEOBJECTS") print $1" "$4" "$7
if ($1 == "FREE") print $1" "$7" "$10
if ($1 == "WRITE") print $1" "$15" "$18" "$21" "$24" "$27}'
}
#
# Given a send stream, verify that the size of the stream matches what's
# expected based on the source or target dataset. If the stream is an
# incremental stream, subtract the size of the source snapshot before
# comparing. This function does not currently handle incremental streams
# that remove data.
#
# $1 The zstreamdump output file
# $2 The dataset to compare against
# This can be a source of a send or recv target (fs, not snapshot)
# $3 The percentage below which verification is deemed a failure
# $4 The source snapshot of an incremental send
#
function verify_stream_size
{
typeset stream=$1
typeset ds=$2
typeset percent=${3:-90}
typeset inc_src=$4
[[ -f $stream ]] || log_fail "No such file: $stream"
datasetexists $ds || log_fail "No such dataset: $ds"
typeset stream_size=$(cat $stream | zstreamdump | sed -n \
's/ Total write size = \(.*\) (0x.*)/\1/p')
typeset inc_size=0
if [[ -n $inc_src ]]; then
inc_size=$(get_prop lrefer $inc_src)
if stream_has_features $stream compressed; then
inc_size=$(get_prop refer $inc_src)
fi
fi
if stream_has_features $stream compressed; then
ds_size=$(get_prop refer $ds)
else
ds_size=$(get_prop lrefer $ds)
fi
ds_size=$((ds_size - inc_size))
within_percent $stream_size $ds_size $percent || log_fail \
"$stream_size $ds_size differed by too much"
}
# Cleanup function for tests involving resumable send
function resume_cleanup
{
typeset sendfs=$1
typeset streamfs=$2
datasetexists $sendfs && log_must zfs destroy -r $sendfs
datasetexists $streamfs && log_must zfs destroy -r $streamfs
cleanup_pool $POOL2
rm -f /$POOL/initial.zsend /$POOL/incremental.zsend
}

View File

@ -37,8 +37,9 @@ verify_runnable "both"
log_assert "Verify resumability of a full and incremental ZFS send/receive " \
"in the presence of a corrupted stream"
log_onexit cleanup_pools $POOL2 $POOL3
log_onexit resume_cleanup $sendfs $streamfs
sendfs=$POOL/sendfs
recvfs=$POOL3/recvfs
streamfs=$POOL2/stream

View File

@ -35,12 +35,13 @@ verify_runnable "both"
log_assert "Verify resumability of full ZFS send/receive with the -D " \
"(dedup) flag"
log_onexit cleanup_pool $POOL2
sendfs=$POOL/sendfs
recvfs=$POOL2/recvfs
streamfs=$POOL/stream
log_onexit resume_cleanup $sendfs $streamfs
test_fs_setup $sendfs $recvfs
resume_test "zfs send -D -v $sendfs@a" $streamfs $recvfs
file_check $sendfs $recvfs

View File

@ -37,12 +37,13 @@ verify_runnable "both"
log_assert "Verify resumability of a full and incremental ZFS send/receive " \
"with the -e (embedded) flag"
log_onexit cleanup_pool $POOL2
sendfs=$POOL/sendfs
recvfs=$POOL2/recvfs
streamfs=$POOL/stream
log_onexit resume_cleanup $sendfs $streamfs
test_fs_setup $sendfs $recvfs
resume_test "zfs send -v -e $sendfs@a" $streamfs $recvfs
resume_test "zfs send -v -e -i @a $sendfs@b" $streamfs $recvfs

View File

@ -45,12 +45,13 @@ fi
log_assert "Verify resumability of an incremental ZFS send/receive with ZFS " \
"bookmarks"
log_onexit cleanup_pool $POOL2
sendfs=$POOL/sendfs
recvfs=$POOL2/recvfs
streamfs=$POOL/stream
log_onexit resume_cleanup $sendfs $streamfs
test_fs_setup $sendfs $recvfs
log_must zfs bookmark $sendfs@a $sendfs#bm_a
log_must zfs destroy $sendfs@a

View File

@ -42,12 +42,13 @@ fi
log_assert "Verify resumability of a full ZFS send/receive with the source " \
"filesystem unmounted"
log_onexit cleanup_pool $POOL2
sendfs=$POOL/sendfs
recvfs=$POOL2/recvfs
streamfs=$POOL/stream
log_onexit resume_cleanup $sendfs $streamfs
test_fs_setup $sendfs $recvfs
log_must zfs unmount $sendfs
resume_test "zfs send $sendfs" $streamfs $recvfs

View File

@ -0,0 +1,77 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify that the -c and -D flags do not interfere with each other.
#
# Strategy:
# 1. Write unique data to a filesystem and create a compressed, deduplicated
# full stream.
# 2. Verify that the stream and send dataset show the same size
# 3. Make several copies of the original data, and create both full and
# incremental compressed, deduplicated send streams
# 4. Verify the full stream is no bigger than the stream from step 1
# 5. Verify the streams can be received correctly.
#
verify_runnable "both"
log_assert "Verify that the -c and -D flags do not interfere with each other"
log_onexit cleanup_pool $POOL2
typeset sendfs=$POOL2/sendfs
typeset recvfs=$POOL2/recvfs
typeset stream0=$BACKDIR/stream.0
typeset stream1=$BACKDIR/stream.1
typeset inc=$BACKDIR/stream.inc
log_must zfs create -o compress=lz4 $sendfs
log_must zfs create -o compress=lz4 $recvfs
typeset dir=$(get_prop mountpoint $sendfs)
# Don't use write_compressible: we want compressible but undedupable data here.
log_must file_write -o overwrite -f $dir/file -d R -b 4096 -c 1000
log_must zfs snapshot $sendfs@snap0
log_must eval "zfs send -D -c $sendfs@snap0 >$stream0"
# The stream size should match at this point because the data is all unique
verify_stream_size $stream0 $sendfs
for i in {0..3}; do
log_must cp $dir/file $dir/file.$i
done
log_must zfs snapshot $sendfs@snap1
# The stream sizes should match, since the second stream contains no new blocks
log_must eval "zfs send -D -c $sendfs@snap1 >$stream1"
typeset size0=$(stat -c %s $stream0)
typeset size1=$(stat -c %s $stream1)
within_percent $size0 $size1 90 || log_fail "$size0 and $size1"
# Finally, make sure the receive works correctly.
log_must eval "zfs send -D -c -i snap0 $sendfs@snap1 >$inc"
log_must eval "zfs recv -d $recvfs <$stream0"
log_must eval "zfs recv -d $recvfs <$inc"
cmp_ds_cont $sendfs $recvfs
# The size of the incremental should be the same as the initial send.
typeset size2=$(stat -c %s $inc)
within_percent $size0 $size2 90 || log_fail "$size0 and $size1"
log_pass "The -c and -D flags do not interfere with each other"

View File

@ -0,0 +1,109 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
. $STF_SUITE/include/properties.shlib
#
# Description:
# Verify that compressed streams can contain embedded blocks.
#
# Strategy:
# 1. Create a filesystem with compressible data and embedded blocks.
# 2. Verify the created streams can be received correctly.
# 3. Verify the presence / absence of embedded blocks in the compressed stream,
# as well as the receiving file system.
#
verify_runnable "both"
log_assert "Verify that compressed streams can contain embedded blocks."
log_onexit cleanup_pool $POOL2
typeset objs obj recsize
typeset sendfs=$POOL2/sendfs
typeset recvfs=$POOL2/recvfs
typeset stream=$BACKDIR/stream
typeset dump=$BACKDIR/dump
typeset recvfs2=$POOL2/recvfs2
typeset stream2=$BACKDIR/stream2
typeset dump2=$BACKDIR/dump2
log_must zfs create -o compress=lz4 $sendfs
log_must zfs create -o compress=lz4 $recvfs
log_must zfs create -o compress=lz4 $recvfs2
typeset dir=$(get_prop mountpoint $sendfs)
# Populate the send dataset with compressible data and embedded block files.
write_compressible $dir 16m
for recsize in "${recsize_prop_vals[@]}"; do
# For lz4, this method works for blocks up to 16k, but not larger
[[ $recsize -eq $((32 * 1024)) ]] && break
if is_linux; then
log_must truncate -s $recsize $dir/$recsize
log_must dd if=/dev/urandom of=$dir/$recsize \
seek=$((recsize - 8)) bs=1 count=8 conv=notrunc
else
log_must mkholes -h 0:$((recsize - 8)) -d $((recsize - 8)):8 \
$dir/$recsize
fi
done
# Generate the streams and zstreamdump output.
log_must zfs snapshot $sendfs@now
log_must eval "zfs send -c $sendfs@now >$stream"
log_must eval "zstreamdump -v <$stream >$dump"
log_must eval "zfs recv -d $recvfs <$stream"
cmp_ds_cont $sendfs $recvfs
verify_stream_size $stream $sendfs
log_mustnot stream_has_features $stream embed_data
log_must eval "zfs send -c -e $sendfs@now >$stream2"
log_must eval "zstreamdump -v <$stream2 >$dump2"
log_must eval "zfs recv -d $recvfs2 <$stream2"
cmp_ds_cont $sendfs $recvfs2
verify_stream_size $stream2 $sendfs
log_must stream_has_features $stream2 embed_data
# Verify embedded blocks are present only when expected.
for recsize in "${recsize_prop_vals[@]}"; do
[[ $recsize -eq $((32 * 1024)) ]] && break
typeset send_obj=$(get_objnum $(get_prop mountpoint $sendfs)/$recsize)
typeset recv_obj=$(get_objnum \
$(get_prop mountpoint $recvfs/sendfs)/$recsize)
typeset recv2_obj=$(get_objnum \
$(get_prop mountpoint $recvfs2/sendfs)/$recsize)
log_must eval "zdb -ddddd $sendfs $send_obj >$BACKDIR/sendfs.zdb"
log_must eval "zdb -ddddd $recvfs/sendfs $recv_obj >$BACKDIR/recvfs.zdb"
log_must eval "zdb -ddddd $recvfs2/sendfs $recv2_obj >$BACKDIR/recvfs2.zdb"
grep -q "EMBEDDED" $BACKDIR/sendfs.zdb || \
log_fail "Obj $send_obj not embedded in $sendfs"
grep -q "EMBEDDED" $BACKDIR/recvfs.zdb || \
log_fail "Obj $recv_obj not embedded in $recvfs"
grep -q "EMBEDDED" $BACKDIR/recvfs2.zdb || \
log_fail "Obj $recv2_obj not embedded in $recvfs2"
grep -q "WRITE_EMBEDDED object = $send_obj offset = 0" $dump && \
log_fail "Obj $obj embedded in zstreamdump output"
grep -q "WRITE_EMBEDDED object = $send_obj offset = 0" $dump2 || \
log_fail "Obj $obj not embedded in zstreamdump output"
done
log_pass "Compressed streams can contain embedded blocks."

View File

@ -0,0 +1,100 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify that compressed send works correctly with incremental sends.
#
# Strategy:
# 1. Randomly choose either a -i or -I incremental.
# 2. Generate compressed incremental replication streams for a pool, a
# descendant dataset, and a volume.
# 3. Receive these streams verifying both the contents, and intermediate
# snapshots are present or absent as appropriate to the -i or -I option.
#
verify_runnable "both"
log_assert "Verify compressed send works with incremental send streams."
log_onexit cleanup_pool $POOL2
typeset opt=$(random_get "-i" "-I")
typeset final dstlist list vol
log_must eval "zfs send -R $POOL@final > $BACKDIR/final"
log_must eval "zfs receive -d -F $POOL2 < $BACKDIR/final"
function do_checks
{
log_must cmp_ds_cont $POOL $POOL2
[[ $opt = "-I" ]] && log_must cmp_ds_subs $POOL $POOL2
[[ $opt = "-i" ]] && log_mustnot cmp_ds_subs $POOL $POOL2
[[ $1 != "clean" ]] && return
cleanup_pool $POOL2
log_must eval "zfs send -R $POOL@final > $BACKDIR/final"
log_must eval "zfs receive -d -F $POOL2 < $BACKDIR/final"
}
if is_global_zone; then
# Send from the pool root
final=$(getds_with_suffix $POOL2 @final)
list="$final $(getds_with_suffix $POOL2 @snapA)"
list="$list $(getds_with_suffix $POOL2 @snapB)"
list="$list $(getds_with_suffix $POOL2 @snapC)"
log_must eval "zfs send -c -R $opt @init $POOL2@final >$BACKDIR/pool"
log_must destroy_tree $list
log_must eval "zfs recv -d -F $POOL2 <$BACKDIR/pool"
dstlist=$(getds_with_suffix $POOL2 @final)
[[ $final != $dstlist ]] && log_fail "$final != $dstlist"
do_checks clean
# Send of a volume
vol=$POOL2/$FS/vol
final=$(getds_with_suffix $vol @final)
log_must eval "zfs send -c -R $opt @init $vol@final >$BACKDIR/vol"
log_must destroy_tree $vol@snapB $vol@snapC $vol@final
log_must eval "zfs recv -d -F $POOL2 <$BACKDIR/vol"
dstlist=$(getds_with_suffix $POOL2/$FS/vol @final)
[[ $final != $dstlist ]] && log_fail "$final != $dstlist"
do_checks clean
fi
# Send of a descendant fs
final=$(getds_with_suffix $POOL2/$FS @final)
list="$final $(getds_with_suffix $POOL2/$FS @snapA)"
list="$list $(getds_with_suffix $POOL2/$FS @snapB)"
list="$list $(getds_with_suffix $POOL2/$FS @snapC)"
log_must eval "zfs send -c -R $opt @init $POOL2/$FS@final >$BACKDIR/fs"
log_must destroy_tree $list
log_must eval "zfs recv -d -F $POOL2 <$BACKDIR/fs"
dstlist=$(getds_with_suffix $POOL2/$FS @final)
[[ $final != $dstlist ]] && log_fail "$final != $dstlist"
do_checks
log_pass "Compressed send works with incremental send streams."

View File

@ -0,0 +1,73 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify a pool without the lz4 feature enabled can create compressed send
# streams, and that they can be received into pools with or without the
# lz4 feature.
#
# Strategy:
# 1. For each of an uncompressed, and gzip dataset created from a pool with
# the lz4 feature disabled, receive the stream into a pool with and without
# the feature enabled.
#
verify_runnable "both"
log_assert "Verify compressed streams are rejected if incompatible."
typeset send_ds=$POOL2/testds
typeset recv_ds=$POOL3/testds
function cleanup
{
poolexists $POOL2 && destroy_pool $POOL2
poolexists $POOL3 && destroy_pool $POOL3
log_must zpool create $POOL2 $DISK2
}
log_onexit cleanup
datasetexists $POOL2 && log_must zpool destroy $POOL2
log_must zpool create -d $POOL2 $DISK2
for compress in off gzip; do
for pool_opt in '' -d; do
poolexists $POOL3 && destroy_pool $POOL3
log_must zpool create $pool_opt $POOL3 $DISK3
datasetexists $send_ds && log_must zfs destroy -r $send_ds
datasetexists $recv_ds && log_must zfs destroy -r $recv_ds
log_must zfs create -o compress=$compress $send_ds
typeset dir=$(get_prop mountpoint $send_ds)
write_compressible $dir 16m
log_must zfs snapshot $send_ds@full
log_must eval "zfs send -c $send_ds@full >$BACKDIR/full-c"
log_must eval "zfs recv $recv_ds <$BACKDIR/full-c"
log_must zfs destroy -r $recv_ds
log_must eval "zfs send $send_ds@full >$BACKDIR/full"
log_must eval "zfs recv $recv_ds <$BACKDIR/full"
done
done
log_pass "Compressed streams are rejected if incompatible."

View File

@ -0,0 +1,54 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
. $STF_SUITE/include/properties.shlib
#
# Description:
# Verify datasets using mixed compression algorithms can be received.
#
# Strategy:
# 1. Write data with each of the available compression algorithms
# 2. Receive a full compressed send, and verify the data and compression ratios
#
verify_runnable "both"
log_assert "Verify datasets using mixed compression algorithms can be received."
log_onexit cleanup_pool $POOL2
send_ds=$POOL2/sendfs
recv_ds=$POOL2/recvfs
log_must zfs create $send_ds
for prop in "${compress_prop_vals[@]}"; do
log_must zfs set compress=$prop $send_ds
write_compressible $(get_prop mountpoint $send_ds) 16m
done
log_must zfs set compress=off $send_ds
log_must zfs snapshot $send_ds@full
log_must eval "zfs send -c $send_ds@full >$BACKDIR/full"
log_must eval "zfs recv $recv_ds <$BACKDIR/full"
verify_stream_size $BACKDIR/full $send_ds
verify_stream_size $BACKDIR/full $recv_ds
log_must cmp_ds_cont $send_ds $recv_ds
log_pass "Datasets using mixed compression algorithms can be received."

View File

@ -0,0 +1,67 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
. $STF_SUITE/include/properties.shlib
#
# Description:
# Verify compressed send streams can still preserve properties
#
# Strategy:
# 1. Randomly modify the properties in the src pool
# 2. Send a full compressed stream with -p to preserve properties
# 3. Verify all the received properties match the source datasets
# 4. Repeat the process with -R instead of -p
#
verify_runnable "global"
function cleanup
{
destroy_pool $POOL
destroy_pool $POOL2
log_must zpool create $POOL $DISK1
log_must zpool create $POOL2 $DISK2
log_must setup_test_model $POOL
}
log_assert "Compressed send doesn't interfere with preservation of properties"
log_onexit cleanup
typeset -a datasets=("" "/pclone" "/$FS" "/$FS/fs1" "/$FS/fs1/fs2"
"/$FS/fs1/fclone" "/vol" "/$FS/vol")
typeset ds
for opt in "-p" "-R"; do
for ds in ${datasets[@]}; do
randomize_ds_props $POOL$ds
done
log_must eval "zfs send -c $opt $POOL@final > $BACKDIR/pool-final$opt"
log_must eval "zfs receive -d -F $POOL2 < $BACKDIR/pool-final$opt"
for ds in ${datasets[@]}; do
log_must cmp_ds_prop $POOL$ds $POOL2$ds
log_must cmp_ds_prop $POOL$ds@final $POOL2$ds@final
done
# Don't cleanup the second time, since we do that on exit anyway.
[[ $opt = "-p" ]] && cleanup
done
log_pass "Compressed send doesn't interfere with preservation of properties"

View File

@ -0,0 +1,55 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify that we can receive a compressed stream into a deduped filesystem.
#
# Strategy:
# 1. Write heavily duplicated data to a filesystem and create a compressed
# full stream.
# 2. Verify that the stream can be received correctly into a dedup=verify
# filesystem.
#
verify_runnable "both"
log_pass "Verify a compressed stream can be received into a deduped filesystem"
log_onexit cleanup_pool $POOL2
typeset sendfs=$POOL2/sendfs
typeset recvfs=$POOL2/recvfs
typeset stream0=$BACKDIR/stream.0
typeset stream1=$BACKDIR/stream.1
typeset inc=$BACKDIR/stream.inc
log_must zfs create -o compress=lz4 $sendfs
log_must zfs create -o compress=lz4 -o dedup=verify $recvfs
typeset dir=$(get_prop mountpoint $sendfs)
for i in {0..10}; do
log_must file_write -o overwrite -f $dir/file.$i -d R -b 4096 -c 1000
done
log_must zfs snapshot $sendfs@snap0
log_must eval "zfs send -c $sendfs@snap0 >$stream0"
# Finally, make sure the receive works correctly.
log_must eval "zfs recv -d $recvfs <$stream0"
cmp_ds_cont $sendfs $recvfs
log_pass "The compressed stream could be received into a deduped filesystem"

View File

@ -0,0 +1,68 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify a pool without the lz4 feature gracefully rejects a compressed stream
# because on any sending pool that supports it, metadata will be compressed
# with lz4 even if user data is not compressed.
#
# Strategy:
# 1. For each of an uncompressed, gzip and lz4 dataset, do the following
# receives into a pool without the lz4 feature:
# 2. Attempt to receive the compressed stream (should fail)
# 3. Attempt to receive the uncompressed stream (should succeed)
#
verify_runnable "both"
log_assert "Verify compressed streams are rejected if incompatible."
typeset compress_types="off gzip lz4"
typeset send_ds=$POOL2/testds
typeset recv_ds=$POOL3/testds
function cleanup
{
poolexists $POOL2 && destroy_pool $POOL2
poolexists $POOL3 && destroy_pool $POOL3
log_must zpool create $POOL2 $DISK2
}
log_onexit cleanup
datasetexists $POOL3 && log_must zpool destroy $POOL3
log_must zpool create -d $POOL3 $DISK3
for compress in $compress_types; do
datasetexists $send_ds && log_must zfs destroy -r $send_ds
datasetexists $recv_ds && log_must zfs destroy -r $recv_ds
log_must zfs create -o compress=$compress $send_ds
typeset dir=$(get_prop mountpoint $send_ds)
write_compressible $dir 16m
log_must zfs snapshot $send_ds@full
log_must eval "zfs send -c $send_ds@full >$BACKDIR/full-c"
log_mustnot eval "zfs recv $recv_ds <$BACKDIR/full-c"
log_must eval "zfs send $send_ds@full >$BACKDIR/full"
log_must eval "zfs recv $recv_ds <$BACKDIR/full"
done
log_pass "Compressed streams are rejected if incompatible."

View File

@ -0,0 +1,49 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify resumability of full and incremental ZFS send/receive with the -c
# (compress) flag in the presence of a corrupted stream.
#
# Strategy:
# 1. Start a full ZFS send with the -c flag (compress), redirect output to
# a file
# 2. Mess up the contents of the stream state file on disk
# 3. Try ZFS receive, which should fail with a checksum mismatch error
# 4. ZFS send to the stream state file again using the receive_resume_token
# 5. ZFS receieve and verify the receive completes successfully
# 6. Repeat steps on an incremental ZFS send
#
verify_runnable "both"
sendfs=$POOL/sendfs
recvfs=$POOL2/recvfs
streamfs=$POOL/stream
log_assert "Verify compressed send streams can be resumed if interrupted"
log_onexit resume_cleanup $sendfs $streamfs
test_fs_setup $sendfs $recvfs
resume_test "zfs send -c -v $sendfs@a" $streamfs $recvfs
resume_test "zfs send -c -v -i @a $sendfs@b" $streamfs $recvfs
file_check $sendfs $recvfs
log_pass "Compressed send streams can be resumed if interrupted"

View File

@ -0,0 +1,91 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify the stream size estimate given by -P accounts for compressed send.
# Verify the stream size given by -P accounts for compressed send."
#
# Strategy:
# 1. For datasets of varied compression types do the following:
# 2. Write data, verify stream size estimates with and without -c
#
verify_runnable "both"
typeset compress_types="off gzip lz4"
typeset send_ds="$POOL2/testfs"
typeset send_vol="$POOL2/vol"
typeset send_voldev="$ZVOL_DEVDIR/$POOL2/vol"
typeset file="$BACKDIR/file.0"
typeset megs="16"
typeset compress
function get_estimated_size
{
typeset cmd=$1
typeset ds=${cmd##* }
typeset tmpfile=$(mktemp -p $BACKDIR)
eval "$cmd >$tmpfile"
[[ $? -eq 0 ]] || log_fail "get_estimated_size: $cmd"
typeset size=$(eval "awk '\$2 == \"$ds\" {print \$3}' $tmpfile")
rm -f $tmpfile
echo $size
}
log_assert "Verify the stream size given by -P accounts for compressed send."
log_onexit cleanup_pool $POOL2
write_compressible $BACKDIR ${megs}m
for compress in $compress_types; do
datasetexists $send_ds && log_must zfs destroy -r $send_ds
datasetexists $send_vol && log_must zfs destroy -r $send_vol
log_must zfs create -o compress=$compress $send_ds
log_must zfs create -V 1g -o compress=$compress $send_vol
typeset dir=$(get_prop mountpoint $send_ds)
log_must cp $file $dir
log_must zfs snapshot $send_ds@snap
log_must dd if=$file of=$send_voldev
log_must zfs snapshot $send_vol@snap
typeset ds_size=$(get_estimated_size "zfs send -nP $send_ds@snap")
typeset ds_lrefer=$(get_prop lrefer $send_ds)
within_percent $ds_size $ds_lrefer 90 || log_fail \
"$ds_size and $ds_lrefer differed by too much"
typeset vol_size=$(get_estimated_size "zfs send -nP $send_vol@snap")
typeset vol_lrefer=$(get_prop lrefer $send_vol)
within_percent $vol_size $vol_lrefer 90 || log_fail \
"$vol_size and $vol_lrefer differed by too much"
typeset ds_csize=$(get_estimated_size "zfs send -nP -c $send_ds@snap")
typeset ds_refer=$(get_prop refer $send_ds)
within_percent $ds_csize $ds_refer 90 || log_fail \
"$ds_csize and $ds_refer differed by too much"
typeset vol_csize=$(get_estimated_size "zfs send -nP -c $send_vol@snap")
typeset vol_refer=$(get_prop refer $send_vol)
within_percent $vol_csize $vol_refer 90 || log_fail \
"$vol_csize and $vol_refer differed by too much"
done
log_pass "The the stream size given by -P accounts for compressed send."

View File

@ -0,0 +1,55 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify compressed send streams replicate data and datasets
#
# Strategy:
# 1. Back up all the data from POOL/FS
# 2. Verify all the datasets and data can be recovered in POOL2
# 3. Back up all the data from root filesystem POOL2
# 4. Verify all the data can be recovered, too
#
verify_runnable "both"
log_assert "zfs send -c -R send replication stream up to the named snap."
log_onexit cleanup_pool $POOL2
# Verify the entire pool and descendants can be backed up and restored.
log_must eval "zfs send -c -R $POOL@final > $BACKDIR/pool-final-R"
log_must eval "zfs receive -d -F $POOL2 < $BACKDIR/pool-final-R"
dstds=$(get_dst_ds $POOL $POOL2)
log_must cmp_ds_subs $POOL $dstds
log_must cmp_ds_cont $POOL $dstds
# Cleanup POOL2
log_must cleanup_pool $POOL2
# Verify all the filesystems and descendants can be backed up and restored.
log_must eval "zfs send -c -R $POOL/$FS@final > $BACKDIR/fs-final-R"
log_must eval "zfs receive -d $POOL2 < $BACKDIR/fs-final-R"
dstds=$(get_dst_ds $POOL/$FS $POOL2)
log_must cmp_ds_subs $POOL/$FS $dstds
log_must cmp_ds_cont $POOL/$FS $dstds
log_pass "zfs send -c -R send replication stream up to the named snap."

View File

@ -0,0 +1,66 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
. $STF_SUITE/include/properties.shlib
#
# Description:
# Verify that the amount of data in a send -c stream matches compressratio.
#
# Strategy:
# 1. For random compression types, and compressible / incompressible data:
# 2. Create a snap with data
# 3. Compare the size of the stream with the data on the dataset, adjusted
# by compressratio for normal send, and compared to used for send -c.
#
verify_runnable "both"
log_assert "Verify send -c streams are compressed"
log_onexit cleanup_pool $POOL2
typeset sendfs=$POOL2/$FS
typeset megs=128
for prop in $(get_rand_compress_any 6); do
for compressible in 'yes' 'no'; do
log_must zfs create -o compress=$prop $sendfs
if [[ $compressible = 'yes' ]]; then
write_compressible $(get_prop mountpoint $sendfs) \
${megs}m
else
typeset file="$(get_prop mountpoint $sendfs)/ddfile"
log_must dd if=/dev/urandom of=$file bs=1024k count=$megs
fi
log_must zfs snapshot $sendfs@snap
# Calculate the sizes and verify the compression ratio.
log_must eval "zfs send $sendfs@snap >$BACKDIR/uncompressed"
verify_stream_size $BACKDIR/uncompressed $sendfs
log_must eval "zfs send -c $sendfs@snap >$BACKDIR/compressed"
verify_stream_size $BACKDIR/compressed $sendfs
log_must rm $BACKDIR/uncompressed $BACKDIR/compressed
log_must zfs destroy -r $sendfs
done
done
log_pass "Verify send -c streams are compressed"

View File

@ -0,0 +1,80 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify that compressed send correctly handles volumes
#
# Strategy:
# 1. Write compressible data into a volume, take a snap
# 2. Verify the compressed stream is the correct size, and has the correct data
# 3. Repeat step 2 for an incremental compressed stream
#
function cleanup
{
log_must zfs destroy -r $vol
cleanup_pool $POOL2
}
verify_runnable "both"
log_assert "Verify compressed send works with volumes"
log_onexit cleanup
typeset vol="$POOL/newvol"
typeset vol2="$POOL2/newvol"
typeset voldev="$ZVOL_DEVDIR/$POOL/newvol"
typeset voldev2="$ZVOL_DEVDIR/$POOL2/newvol"
typeset data1=$BACKDIR/file.0
typeset data2=$BACKDIR/file.1
typeset megs=8
log_must zfs create -V 256m -o compress=lz4 $vol
write_compressible $BACKDIR ${megs}m 2
md5_1=$(md5sum $data1 | awk '{print $1}')
md5_2=$(md5sum $data2 | awk '{print $1}')
log_must dd if=$data1 of=$voldev bs=1024k
log_must zfs snapshot $vol@snap
log_must eval "zfs send -c $vol@snap >$BACKDIR/full"
log_must eval "zfs recv -d $POOL2 <$BACKDIR/full"
verify_stream_size $BACKDIR/full $vol
verify_stream_size $BACKDIR/full $vol2
md5=$(dd if=$voldev2 bs=1024k count=$megs 2>/dev/null | md5sum | \
awk '{print $1}')
[[ $md5 = $md5_1 ]] || log_fail "md5 mismatch: $md5 != $md5_1"
# Repeat, for an incremental send
log_must dd seek=$megs if=$data2 of=$voldev bs=1024k
log_must zfs snapshot $vol@snap2
log_must eval "zfs send -c -i snap $vol@snap2 >$BACKDIR/inc"
log_must eval "zfs recv -d $POOL2 <$BACKDIR/inc"
verify_stream_size $BACKDIR/inc $vol 90 $vol@snap
verify_stream_size $BACKDIR/inc $vol2 90 $vol2@snap
md5=$(dd skip=$megs if=$voldev2 bs=1024k count=$megs 2>/dev/null | md5sum | \
awk '{print $1}')
[[ $md5 = $md5_2 ]] || log_fail "md5 mismatch: $md5 != $md5_2"
log_pass "Verify compressed send works with volumes"

View File

@ -0,0 +1,59 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
. $STF_SUITE/include/math.shlib
#
# Description:
# Verify compression features show up in zstreamdump
#
# Strategy:
# 1. Create a full compressed send stream
# 2. Verify zstreamdump shows this stream has the relevant features
# 3. Verify zstreamdump's accounting of logical and compressed size is correct
#
verify_runnable "both"
log_assert "Verify zstreamdump correctly interprets compressed send streams."
log_onexit cleanup_pool $POOL2
typeset sendfs=$POOL2/fs
log_must zfs create -o compress=lz4 $sendfs
typeset dir=$(get_prop mountpoint $sendfs)
write_compressible $dir 16m
log_must zfs snapshot $sendfs@full
log_must eval "zfs send -c $sendfs@full >$BACKDIR/full"
log_must stream_has_features $BACKDIR/full lz4 compressed
cat $BACKDIR/full | zstreamdump -v | parse_dump > $BACKDIR/dump.out
lsize=$(awk '/^WRITE [^0]/ {lsize += $4} END {printf("%d", lsize)}' \
$BACKDIR/dump.out)
lsize_prop=$(get_prop logicalused $sendfs)
within_percent $lsize $lsize_prop 90 || log_fail \
"$lsize and $lsize_prop differed by too much"
csize=$(awk '/^WRITE [^0]/ {csize += $5} END {printf("%d", csize)}' \
$BACKDIR/dump.out)
csize_prop=$(get_prop used $sendfs)
within_percent $csize $csize_prop 90 || log_fail \
"$csize and $csize_prop differed by too much"
log_pass "zstreamdump correctly interprets compressed send streams."

View File

@ -0,0 +1,199 @@
#!/bin/ksh -p
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
#
. $STF_SUITE/tests/functional/rsend/rsend.kshlib
#
# Description:
# Verify compressed send works correctly with datasets of varying recsize.
#
# Strategy:
# 1. Check the recv behavior (into pools with features enabled and disabled)
# of all combinations of -c -p and -L. Verify the stream is compressed,
# and that the recsize property and that of a received file is correct
# according to this matrix:
#
# +---------+--------+------------+------------+-----------+-----------+
# | send | send | received | received | received | received |
# | stream | stream | file bs | prop | file bs | props |
# | recsize | flags | (disabled) | (disabled) | (enabled) | (enabled) |
# +---------+--------+------------+------------+-----------+-----------+
# | 128k | | 128k | 128k | 128k | 128k |
# | 128k | -c | Fails | Fails | 128k | 128k |
# | 128k | -p | 128k | 128k | 128k | 128k |
# | 128k | -L | 128k | 128k | 128k | 128k |
# | 128k | -cp | Fails | Fails | 128k | 128k |
# | 128k | -cL | Fails | Fails | 128k | 128k |
# | 128k | -pL | 128k | 128k | 128k | 128k |
# | 128k | -cpL | Fails | Fails | 128k | 128k |
# | 1m | | Fails | Fails | 128k | 128k |
# | 1m | -c | Fails | Fails | 128k | 128k |
# | 1m | -p | 128k | 128k | 128k | 1m |
# | 1m | -L | Fails | Fails | 1m | 128k |
# | 1m | -cp | Fails | Fails | 128k | 1m |
# | 1m | -cL | Fails | Fails | 1m | 128k |
# | 1m | -pL | Fails | Fails | 1m | 1m |
# | 1m | -cpL | Fails | Fails | 1m | 1m |
# +---------+--------+------------+------------+-----------+-----------+
#
verify_runnable "both"
function cleanup
{
datasetexists $TESTPOOL/128k && log_must zfs destroy $TESTPOOL/128k
datasetexists $TESTPOOL/1m && log_must zfs destroy $TESTPOOL/1m
cleanup_pool $POOL2
destroy_pool $POOL3
}
# For a received stream, verify the recsize (prop and file) match expectations.
function check_recsize
{
typeset recv_ds=$1
typeset expected_file_bs=$2
typeset expected_recsize=$3
typeset file="$(get_prop mountpoint $recv_ds)/testfile"
[[ -f $file ]] || log_fail "file '$file' doesn't exist"
typeset read_recsize=$(get_prop recsize $recv_ds)
typeset read_file_bs=$(stat $file | sed -n \
's/.*IO Block: \([0-9]*\).*/\1/p')
[[ $read_recsize = $expected_recsize ]] || log_fail \
"read_recsize: $read_recsize expected_recsize: $expected_recsize"
[[ $read_file_bs = $expected_file_bs ]] || log_fail \
"read_file_bs: $read_file_bs expected_file_bs: $expected_file_bs"
}
#
# This function does a zfs send and receive according to the parameters
# below, and verifies the data shown in the strategy section.
#
# -[cpL] flags to pass through to 'zfs send'
# -d Receive into a pool with all features disabled
#
# $1 The recordsize of the send dataset
# $2 Whether or not the recv should work.
# $3 The blocksize expected in a received file (default 128k)
# $4 The recordsize property expected in a received dataset (default 128k)
#
function check
{
typeset recv_pool=$POOL2
typeset flags='-'
while getopts "cdpL" opt; do
case $opt in
c)
flags+='c'
;;
d)
recv_pool=$POOL3
;;
p)
flags+='p'
;;
L)
flags+='L'
;;
esac
done
shift $(($OPTIND - 1))
[[ ${#flags} -eq 1 ]] && flags=''
typeset recsize=$1
typeset verify=$2
typeset expected_file_bs=${3-131072}
typeset expected_recsize=${4-131072}
typeset send_ds=$TESTPOOL/$recsize
typeset send_snap=$send_ds@snap
typeset recv_ds=$recv_pool/$recsize
typeset stream=$BACKDIR/stream.out
datasetexists $send_ds || log_fail "send ds: $send_ds doesn't exist"
[[ -f $stream ]] && log_must rm $stream
log_must eval "zfs send $flags $send_snap >$stream"
$verify eval "zfs recv $recv_ds <$stream"
typeset stream_size=$(cat $stream | zstreamdump | sed -n \
's/ Total write size = \(.*\) (0x.*)/\1/p')
#
# Special case: For a send dataset with large blocks, don't try to
# verify the stream size is correct if the compress flag is present
# but the large blocks flag isn't. In these cases, the user data
# isn't compressed in the stream (though metadata is) so the
# verification would fail.
#
typeset do_size_test=true
[[ $recsize = $large && $flags =~ 'c' && ! $flags =~ 'L' ]] && \
do_size_test=false
$do_size_test && verify_stream_size $stream $send_ds
if [[ $verify = "log_mustnot" ]]; then
datasetnonexists $recv_ds || log_fail "$recv_ds shouldn't exist"
return
fi
check_recsize $recv_ds $expected_file_bs $expected_recsize
$do_size_test && verify_stream_size $stream $recv_ds
log_must zfs destroy -r $recv_ds
}
log_assert "Verify compressed send works with datasets of varying recsize."
log_onexit cleanup
typeset recsize opts dir
typeset small=$((128 * 1024))
typeset large=$((1024 * 1024))
# Create POOL3 with features disabled and datasets to create test send streams
datasetexists $POOL3 && log_must zpool destroy $POOL3
log_must zpool create -d $POOL3 $DISK3
write_compressible $BACKDIR 32m
for recsize in $small $large; do
log_must zfs create -o compress=gzip -o recsize=$recsize \
$TESTPOOL/$recsize
dir=$(get_prop mountpoint $TESTPOOL/$recsize)
log_must cp $BACKDIR/file.0 $dir/testfile
log_must zfs snapshot $TESTPOOL/$recsize@snap
done
# Run tests for send streams without large blocks
for opts in '' -d -c -p -dp -L -dL -cp -cL -pL -dpL -cpL; do
check $opts $small log_must
done
for opts in -dc -dcp -dcL -dcpL; do
check $opts $small log_mustnot
done
# Run tests for send streams with large blocks
for opts in '' -d -dp -c; do
check $opts $large log_must
done
for opts in -dc -dL -dcp -dcL -dpL -dcpL; do
check $opts $large log_mustnot
done
check -p $large log_must $small $large
check -L $large log_must $large $small
check -cp $large log_must $small $large
check -cL $large log_must $large $small
check -pL $large log_must $large $large
check -cpL $large log_must $large $large
log_pass "Compressed send works with datasets of varying recsize."

View File

@ -10,7 +10,7 @@
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
# Copyright (c) 2016 by Delphix. All rights reserved.
#
[global]
@ -24,7 +24,7 @@ thread=1
directory=/${TESTFS}
numjobs=${NUMJOBS}
filesize=${FILE_SIZE}
buffer_compress_percentage=33
buffer_compress_percentage=66
buffer_compress_chunk=4096
[job]

View File

@ -10,7 +10,7 @@
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
#
[global]
@ -29,7 +29,7 @@ bssplit=4k/50:8k/30:128k/10:1m/10
ioengine=psync
sync=${SYNC_TYPE}
numjobs=${NUMJOBS}
buffer_compress_percentage=33
buffer_compress_percentage=66
buffer_compress_chunk=4096
[job]

View File

@ -10,7 +10,7 @@
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
#
[global]
@ -27,7 +27,7 @@ ioengine=psync
sync=${SYNC_TYPE}
numjobs=${NUMJOBS}
filesize=${FILESIZE}
buffer_compress_percentage=33
buffer_compress_percentage=66
buffer_compress_chunk=4096
[job]

View File

@ -10,7 +10,7 @@
#
#
# Copyright (c) 2015 by Delphix. All rights reserved.
# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
#
[global]
@ -27,7 +27,7 @@ ioengine=psync
sync=${SYNC_TYPE}
numjobs=${NUMJOBS}
filesize=${FILESIZE}
buffer_compress_percentage=33
buffer_compress_percentage=66
buffer_compress_chunk=4096
[job]