From 419ba5914552c6185afbe1dd17b3ed4b0d526547 Mon Sep 17 00:00:00 2001 From: Serapheim Dimitropoulos Date: Wed, 16 Jan 2019 15:06:20 -0800 Subject: [PATCH] Update vdev_is_spacemap_addressable() for new spacemap encoding Since the new spacemap encoding was ported to ZoL that's no longer a limitation. This patch updates vdev_is_spacemap_addressable() that was performing that check. It also updates the appropriate test to ensure that the same functionality is tested. The test does so by creating pools that don't have the new spacemap encoding enabled - just the checkpoint feature. This patch also reorganizes that same tests in order to cut in half its memory consumption. Reviewed by: Matt Ahrens Reviewed-by: Brian Behlendorf Signed-off-by: Serapheim Dimitropoulos Closes #8286 --- module/zfs/vdev.c | 14 ++++-- .../pool_checkpoint/checkpoint_sm_scale.ksh | 46 +++++++++++++------ 2 files changed, 41 insertions(+), 19 deletions(-) diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c index 26ef5b4c57..cf49172d5f 100644 --- a/module/zfs/vdev.c +++ b/module/zfs/vdev.c @@ -3804,13 +3804,17 @@ vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx) boolean_t vdev_is_spacemap_addressable(vdev_t *vd) { + if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_SPACEMAP_V2)) + return (B_TRUE); + /* - * Assuming 47 bits of the space map entry dedicated for the entry's - * offset (see description in space_map.h), we calculate the maximum - * address that can be described by a space map entry for the given - * device. + * If double-word space map entries are not enabled we assume + * 47 bits of the space map entry are dedicated to the entry's + * offset (see SM_OFFSET_BITS in space_map.h). We then use that + * to calculate the maximum address that can be described by a + * space map entry for the given device. */ - uint64_t shift = vd->vdev_ashift + 47; + uint64_t shift = vd->vdev_ashift + SM_OFFSET_BITS; if (shift >= 63) /* detect potential overflow */ return (B_TRUE); diff --git a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh index 5247d60072..e24c4eb552 100755 --- a/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh +++ b/tests/zfs-tests/tests/functional/pool_checkpoint/checkpoint_sm_scale.ksh @@ -12,21 +12,21 @@ # # -# Copyright (c) 2017 by Delphix. All rights reserved. +# Copyright (c) 2017, 2018 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/pool_checkpoint/pool_checkpoint.kshlib # # DESCRIPTION: -# The maximum address that can be described by the current space -# map design (assuming the minimum 512-byte addressable storage) -# limits the maximum allocatable space of any top-level vdev to -# 64PB whenever a vdev-wide space map is used. +# The maximum address that can be described by a single-word +# space map entry limits the maximum allocatable space of any +# top-level vdev to 64PB whenever a vdev-wide space map is used. # # Since a vdev-wide space map is introduced for the checkpoint -# we want to ensure that we cannot checkpoint a pool that has a -# top-level vdev with more than 64PB of allocatable space. +# we want to ensure that we cannot checkpoint a pool that does +# not use the new space map encoding (V2) and has a top-level +# vdev with more than 64PB of allocatable space. # # Note: Since this is a pool created from file-based vdevs we # are guaranteed that vdev_ashift is SPA_MINBLOCKSHIFT @@ -35,12 +35,25 @@ # # STRATEGY: # 1. Create pool with a disk of exactly 64PB -# (so ~63.5PB of allocatable space) +# (so ~63.5PB of allocatable space) and +# ensure that has the checkpoint feature +# enabled but not space map V2 # 2. Ensure that you can checkpoint it # 3. Create pool with a disk of exactly 65PB -# (so ~64.5PB of allocatable space) +# (so ~64.5PB of allocatable space) with +# the same setup # 4. Ensure we fail trying to checkpoint it # +# Note: +# This test used to create the two pools and attempt to checkpoint +# them at the same time, then destroy them. We later had to change +# this to test one pool at a time as the metaslabs (even though empty) +# consumed a lot of memory, especially on a machine that has been +# running with debug enabled. To give an example, each metaslab +# structure is ~1712 bytes (at the time of this writing), and each +# vdev has 128K metaslabs, which means that just the structures +# consume 131071 * 1712 = ~224M. +# verify_runnable "global" @@ -65,10 +78,15 @@ log_must zfs create $DISKFS log_must mkfile -n $((64 * 1024 * 1024))g $DISK64PB log_must mkfile -n $((65 * 1024 * 1024))g $DISK65PB -log_must zpool create $TESTPOOL1 $DISK64PB -log_must zpool create $TESTPOOL2 $DISK65PB - +log_must zpool create -d $TESTPOOL1 $DISK64PB +log_must zpool set feature@zpool_checkpoint=enabled $TESTPOOL1 log_must zpool checkpoint $TESTPOOL1 -log_mustnot zpool checkpoint $TESTPOOL2 +destroy_pool $TESTPOOL1 -log_pass "Attempting to checkpoint a pool with a vdev that's more than 64PB." +log_must zpool create -d $TESTPOOL2 $DISK65PB +log_must zpool set feature@zpool_checkpoint=enabled $TESTPOOL2 +log_mustnot zpool checkpoint $TESTPOOL2 +destroy_pool $TESTPOOL2 + +log_pass "Fail to checkpoint pool with old spacemap encoding" \ + " and a vdev that's more than 64PB."