diff --git a/include/sys/spa.h b/include/sys/spa.h index 2ae467877d..a55dbd66d8 100644 --- a/include/sys/spa.h +++ b/include/sys/spa.h @@ -1075,6 +1075,7 @@ extern void spa_upgrade(spa_t *spa, uint64_t version); extern void spa_evict_all(void); extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t l2cache); +extern boolean_t spa_has_l2cache(spa_t *, uint64_t guid); extern boolean_t spa_has_spare(spa_t *, uint64_t guid); extern uint64_t dva_get_dsize_sync(spa_t *spa, const dva_t *dva); extern uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp); diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 1083b5a90d..7546e3e414 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -9451,12 +9451,11 @@ spa_upgrade(spa_t *spa, uint64_t version) txg_wait_synced(spa_get_dsl(spa), 0); } -boolean_t -spa_has_spare(spa_t *spa, uint64_t guid) +static boolean_t +spa_has_aux_vdev(spa_t *spa, uint64_t guid, spa_aux_vdev_t *sav) { int i; - uint64_t spareguid; - spa_aux_vdev_t *sav = &spa->spa_spares; + uint64_t vdev_guid; for (i = 0; i < sav->sav_count; i++) if (sav->sav_vdevs[i]->vdev_guid == guid) @@ -9464,13 +9463,25 @@ spa_has_spare(spa_t *spa, uint64_t guid) for (i = 0; i < sav->sav_npending; i++) { if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID, - &spareguid) == 0 && spareguid == guid) + &vdev_guid) == 0 && vdev_guid == guid) return (B_TRUE); } return (B_FALSE); } +boolean_t +spa_has_l2cache(spa_t *spa, uint64_t guid) +{ + return (spa_has_aux_vdev(spa, guid, &spa->spa_l2cache)); +} + +boolean_t +spa_has_spare(spa_t *spa, uint64_t guid) +{ + return (spa_has_aux_vdev(spa, guid, &spa->spa_spares)); +} + /* * Check if a pool has an active shared spare device. * Note: reference count of an active spare is 2, as a spare and as a replace diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c index f03ae0873f..daf53f0a0c 100644 --- a/module/zfs/vdev_label.c +++ b/module/zfs/vdev_label.c @@ -933,7 +933,7 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason, /* * Check to see if this is a spare device. We do an explicit check for * spa_has_spare() here because it may be on our pending list of spares - * to add. We also check if it is an l2cache device. + * to add. */ if (spa_spare_exists(device_guid, &spare_pool, NULL) || spa_has_spare(spa, device_guid)) { @@ -942,7 +942,6 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason, switch (reason) { case VDEV_LABEL_CREATE: - case VDEV_LABEL_L2CACHE: return (B_TRUE); case VDEV_LABEL_REPLACE: @@ -959,8 +958,24 @@ vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason, /* * Check to see if this is an l2cache device. */ - if (spa_l2cache_exists(device_guid, NULL)) - return (B_TRUE); + if (spa_l2cache_exists(device_guid, NULL) || + spa_has_l2cache(spa, device_guid)) { + if (l2cache_guid) + *l2cache_guid = device_guid; + + switch (reason) { + case VDEV_LABEL_CREATE: + return (B_TRUE); + + case VDEV_LABEL_REPLACE: + return (!spa_has_l2cache(spa, device_guid)); + + case VDEV_LABEL_L2CACHE: + return (spa_has_l2cache(spa, device_guid)); + default: + break; + } + } /* * We can't rely on a pool's state if it's been imported diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_009_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_009_neg.ksh index 7ffe9512af..0a9fa868cb 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_009_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_009_neg.ksh @@ -39,8 +39,9 @@ # # STRATEGY: # 1. Create a storage pool -# 2. Add the two same devices to pool A -# 3. Add the device in pool A to pool A again +# 2. Add the device in pool A to pool A again +# 3. Add the two devices to pool A in the loop, one of them already +# added or same device added multiple times # verify_runnable "global" @@ -58,8 +59,13 @@ log_onexit cleanup create_pool $TESTPOOL $DISK0 log_must poolexists $TESTPOOL -log_mustnot zpool add -f $TESTPOOL $DISK1 $DISK1 log_mustnot zpool add -f $TESTPOOL $DISK0 +for type in "" "mirror" "raidz" "draid" "spare" "log" "dedup" "special" "cache" +do + log_mustnot zpool add -f $TESTPOOL $type $DISK0 $DISK1 + log_mustnot zpool add -f $TESTPOOL $type $DISK1 $DISK1 +done + log_pass "'zpool add' get fail as expected if vdevs are the same or vdev is " \ "contained in the given pool."