diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5 index e0000a23a1..321b6285ca 100644 --- a/man/man5/zfs-module-parameters.5 +++ b/man/man5/zfs-module-parameters.5 @@ -1379,17 +1379,6 @@ Max commit bytes to separate log device Default value: \fB1,048,576\fR. .RE -.sp -.ne 2 -.na -\fBzio_bulk_flags\fR (int) -.ad -.RS 12n -Additional flags to pass to bulk buffers -.sp -Default value: \fB0\fR. -.RE - .sp .ne 2 .na diff --git a/module/zfs/zio.c b/module/zfs/zio.c index 7dcb420066..d904b30b21 100644 --- a/module/zfs/zio.c +++ b/module/zfs/zio.c @@ -57,7 +57,6 @@ kmem_cache_t *zio_cache; kmem_cache_t *zio_link_cache; kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT]; -int zio_bulk_flags = 0; int zio_delay_max = ZIO_DELAY_MAX; /* @@ -142,6 +141,7 @@ zio_init(void) size_t size = (c + 1) << SPA_MINBLOCKSHIFT; size_t p2 = size; size_t align = 0; + size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0; while (p2 & (p2 - 1)) p2 &= p2 - 1; @@ -166,16 +166,14 @@ zio_init(void) if (align != 0) { char name[36]; - int flags = zio_bulk_flags; - (void) sprintf(name, "zio_buf_%lu", (ulong_t)size); zio_buf_cache[c] = kmem_cache_create(name, size, - align, NULL, NULL, NULL, NULL, NULL, flags); + align, NULL, NULL, NULL, NULL, NULL, cflags); (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size); zio_data_buf_cache[c] = kmem_cache_create(name, size, align, NULL, NULL, NULL, NULL, - data_alloc_arena, flags); + data_alloc_arena, cflags); } } @@ -3398,9 +3396,6 @@ EXPORT_SYMBOL(zio_data_buf_alloc); EXPORT_SYMBOL(zio_buf_free); EXPORT_SYMBOL(zio_data_buf_free); -module_param(zio_bulk_flags, int, 0644); -MODULE_PARM_DESC(zio_bulk_flags, "Additional flags to pass to bulk buffers"); - module_param(zio_delay_max, int, 0644); MODULE_PARM_DESC(zio_delay_max, "Max zio millisec delay before posting event"); diff --git a/scripts/zpios-survey.sh b/scripts/zpios-survey.sh index cb751b467e..78601695ff 100755 --- a/scripts/zpios-survey.sh +++ b/scripts/zpios-survey.sh @@ -120,40 +120,13 @@ zpios_survey_pending() { tee -a ${ZPIOS_SURVEY_LOG} } -# To avoid memory fragmentation issues our slab implementation can be -# based on a virtual address space. Interestingly, we take a pretty -# substantial performance penalty for this somewhere in the low level -# IO drivers. If we back the slab with kmem pages we see far better -# read performance numbers at the cost of memory fragmention and general -# system instability due to large allocations. This may be because of -# an optimization in the low level drivers due to the contigeous kmem -# based memory. This needs to be explained. The good news here is that -# with zerocopy interfaces added at the DMU layer we could gaurentee -# kmem based memory for a pool of pages. -# -# 0x100 = KMC_KMEM - Force kmem_* based slab -# 0x200 = KMC_VMEM - Force vmem_* based slab -zpios_survey_kmem() { - TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+kmem" - print_header ${TEST_NAME} - - ${ZFS_SH} ${VERBOSE_FLAG} \ - zfs="zio_bulk_flags=0x100" | \ - tee -a ${ZPIOS_SURVEY_LOG} - ${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} | \ - tee -a ${ZPIOS_SURVEY_LOG} - ${ZFS_SH} -u ${VERBOSE_FLAG} | \ - tee -a ${ZPIOS_SURVEY_LOG} -} - # Apply all possible turning concurrently to get a best case number zpios_survey_all() { TEST_NAME="${ZPOOL_CONFIG}+${ZPIOS_TEST}+all" print_header ${TEST_NAME} ${ZFS_SH} ${VERBOSE_FLAG} \ - zfs="zfs_vdev_max_pending=1024" \ - zfs="zio_bulk_flags=0x100" | \ + zfs="zfs_vdev_max_pending=1024" | \ tee -a ${ZPIOS_SURVEY_LOG} ${ZPIOS_SH} ${VERBOSE_FLAG} -c ${ZPOOL_CONFIG} -t ${ZPIOS_TEST} \ -o "--noprefetch --zerocopy" \ @@ -209,7 +182,6 @@ zpios_survey_prefetch zpios_survey_zerocopy zpios_survey_checksum zpios_survey_pending -zpios_survey_kmem zpios_survey_all exit 0