2010-05-17 22:18:00 +00:00
|
|
|
/*****************************************************************************\
|
|
|
|
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
|
|
|
* Copyright (C) 2007 The Regents of the University of California.
|
|
|
|
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
|
|
|
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
2008-05-26 04:38:26 +00:00
|
|
|
* UCRL-CODE-235197
|
|
|
|
*
|
2010-05-17 22:18:00 +00:00
|
|
|
* This file is part of the SPL, Solaris Porting Layer.
|
2013-03-05 01:26:55 +00:00
|
|
|
* For details, see <http://zfsonlinux.org/>.
|
2010-05-17 22:18:00 +00:00
|
|
|
*
|
|
|
|
* The SPL is free software; you can redistribute it and/or modify it
|
|
|
|
* under the terms of the GNU General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2 of the License, or (at your
|
|
|
|
* option) any later version.
|
2008-05-26 04:38:26 +00:00
|
|
|
*
|
2010-05-17 22:18:00 +00:00
|
|
|
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
2008-05-26 04:38:26 +00:00
|
|
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License along
|
2010-05-17 22:18:00 +00:00
|
|
|
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*****************************************************************************
|
|
|
|
* Solaris Porting LAyer Tests (SPLAT) Kmem Tests.
|
|
|
|
\*****************************************************************************/
|
2008-05-26 04:38:26 +00:00
|
|
|
|
2012-11-02 22:29:44 +00:00
|
|
|
#include <sys/kmem.h>
|
2014-12-08 18:04:42 +00:00
|
|
|
#include <sys/kmem_cache.h>
|
|
|
|
#include <sys/vmem.h>
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
#include <sys/random.h>
|
2012-11-02 22:29:44 +00:00
|
|
|
#include <sys/thread.h>
|
2014-12-08 18:04:42 +00:00
|
|
|
#include <sys/vmsystm.h>
|
2008-02-27 23:42:31 +00:00
|
|
|
#include "splat-internal.h"
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
#define SPLAT_KMEM_NAME "kmem"
|
|
|
|
#define SPLAT_KMEM_DESC "Kernel Malloc/Slab Tests"
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
#define SPLAT_KMEM_TEST1_ID 0x0101
|
|
|
|
#define SPLAT_KMEM_TEST1_NAME "kmem_alloc"
|
|
|
|
#define SPLAT_KMEM_TEST1_DESC "Memory allocation test (kmem_alloc)"
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
#define SPLAT_KMEM_TEST2_ID 0x0102
|
|
|
|
#define SPLAT_KMEM_TEST2_NAME "kmem_zalloc"
|
|
|
|
#define SPLAT_KMEM_TEST2_DESC "Memory allocation test (kmem_zalloc)"
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
#define SPLAT_KMEM_TEST3_ID 0x0103
|
2008-06-13 23:41:06 +00:00
|
|
|
#define SPLAT_KMEM_TEST3_NAME "vmem_alloc"
|
|
|
|
#define SPLAT_KMEM_TEST3_DESC "Memory allocation test (vmem_alloc)"
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
#define SPLAT_KMEM_TEST4_ID 0x0104
|
2008-06-13 23:41:06 +00:00
|
|
|
#define SPLAT_KMEM_TEST4_NAME "vmem_zalloc"
|
|
|
|
#define SPLAT_KMEM_TEST4_DESC "Memory allocation test (vmem_zalloc)"
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-03-14 19:04:41 +00:00
|
|
|
#define SPLAT_KMEM_TEST5_ID 0x0105
|
2009-01-31 04:54:49 +00:00
|
|
|
#define SPLAT_KMEM_TEST5_NAME "slab_small"
|
2008-06-13 23:41:06 +00:00
|
|
|
#define SPLAT_KMEM_TEST5_DESC "Slab ctor/dtor test (small)"
|
|
|
|
|
|
|
|
#define SPLAT_KMEM_TEST6_ID 0x0106
|
2009-01-31 04:54:49 +00:00
|
|
|
#define SPLAT_KMEM_TEST6_NAME "slab_large"
|
2008-06-13 23:41:06 +00:00
|
|
|
#define SPLAT_KMEM_TEST6_DESC "Slab ctor/dtor test (large)"
|
|
|
|
|
|
|
|
#define SPLAT_KMEM_TEST7_ID 0x0107
|
2009-01-31 04:54:49 +00:00
|
|
|
#define SPLAT_KMEM_TEST7_NAME "slab_align"
|
|
|
|
#define SPLAT_KMEM_TEST7_DESC "Slab alignment test"
|
2008-03-14 19:04:41 +00:00
|
|
|
|
2008-06-23 23:54:52 +00:00
|
|
|
#define SPLAT_KMEM_TEST8_ID 0x0108
|
2009-01-31 04:54:49 +00:00
|
|
|
#define SPLAT_KMEM_TEST8_NAME "slab_reap"
|
|
|
|
#define SPLAT_KMEM_TEST8_DESC "Slab reaping test"
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2009-01-26 17:02:04 +00:00
|
|
|
#define SPLAT_KMEM_TEST9_ID 0x0109
|
2009-01-31 04:54:49 +00:00
|
|
|
#define SPLAT_KMEM_TEST9_NAME "slab_age"
|
|
|
|
#define SPLAT_KMEM_TEST9_DESC "Slab aging test"
|
|
|
|
|
|
|
|
#define SPLAT_KMEM_TEST10_ID 0x010a
|
|
|
|
#define SPLAT_KMEM_TEST10_NAME "slab_lock"
|
|
|
|
#define SPLAT_KMEM_TEST10_DESC "Slab locking test"
|
|
|
|
|
2012-11-02 23:13:50 +00:00
|
|
|
#if 0
|
2009-01-31 04:54:49 +00:00
|
|
|
#define SPLAT_KMEM_TEST11_ID 0x010b
|
|
|
|
#define SPLAT_KMEM_TEST11_NAME "slab_overcommit"
|
|
|
|
#define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
|
2012-11-02 23:13:50 +00:00
|
|
|
#endif
|
2009-01-26 17:02:04 +00:00
|
|
|
|
2012-04-30 22:37:49 +00:00
|
|
|
#define SPLAT_KMEM_TEST13_ID 0x010d
|
|
|
|
#define SPLAT_KMEM_TEST13_NAME "slab_reclaim"
|
|
|
|
#define SPLAT_KMEM_TEST13_DESC "Slab direct memory reclaim test"
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
#define SPLAT_KMEM_ALLOC_COUNT 10
|
2008-03-14 19:04:41 +00:00
|
|
|
#define SPLAT_VMEM_ALLOC_COUNT 10
|
|
|
|
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2008-02-26 20:36:04 +00:00
|
|
|
static int
|
2008-02-27 23:42:31 +00:00
|
|
|
splat_kmem_test1(struct file *file, void *arg)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
2008-02-27 23:42:31 +00:00
|
|
|
void *ptr[SPLAT_KMEM_ALLOC_COUNT];
|
2008-02-26 20:36:04 +00:00
|
|
|
int size = PAGE_SIZE;
|
|
|
|
int i, count, rc = 0;
|
|
|
|
|
Refactor generic memory allocation interfaces
This patch achieves the following goals:
1. It replaces the preprocessor kmem flag to gfp flag mapping with
proper translation logic. This eliminates the potential for
surprises that were previously possible where kmem flags were
mapped to gfp flags.
2. It maps vmem_alloc() allocations to kmem_alloc() for allocations
sized less than or equal to the newly-added spl_kmem_alloc_max
parameter. This ensures that small allocations will not contend
on a single global lock, large allocations can still be handled,
and potentially limited virtual address space will not be squandered.
This behavior is entirely different than under Illumos due to
different memory management strategies employed by the respective
kernels. However, this functionally provides the semantics required.
3. The --disable-debug-kmem, --enable-debug-kmem (default), and
--enable-debug-kmem-tracking allocators have been unified in to
a single spl_kmem_alloc_impl() allocation function. This was
done to simplify the code and make it more maintainable.
4. Improve portability by exposing an implementation of the memory
allocations functions that can be safely used in the same way
they are used on Illumos. Specifically, callers may safely
use KM_SLEEP in contexts which perform filesystem IO. This
allows us to eliminate an entire class of Linux specific changes
which were previously required to avoid deadlocking the system.
This change will be largely transparent to existing callers but there
are a few caveats:
1. Because the headers were refactored and extraneous includes removed
callers may find they need to explicitly add additional #includes.
In particular, kmem_cache.h must now be explicitly includes to
access the SPL's kmem cache implementation. This behavior is
different from Illumos but it was done to avoid always masking
the Linux slab functions when kmem.h is included.
2. Callers, like Lustre, which made assumptions about the definitions
of KM_SLEEP, KM_NOSLEEP, and KM_PUSHPAGE will need to be updated.
Other callers such as ZFS which did not will not require changes.
3. KM_PUSHPAGE is no longer overloaded to imply GFP_NOIO. It retains
its original meaning of allowing allocations to access reserved
memory. KM_PUSHPAGE callers can be converted back to KM_SLEEP.
4. The KM_NODEBUG flags has been retired and the default warning
threshold increased to 32k.
5. The kmem_virt() functions has been removed. For callers which
need to distinguish between a physical and virtual address use
is_vmalloc_addr().
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-08 20:37:14 +00:00
|
|
|
while ((!rc) && (size <= spl_kmem_alloc_warn)) {
|
2008-02-26 20:36:04 +00:00
|
|
|
count = 0;
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
|
Refactor generic memory allocation interfaces
This patch achieves the following goals:
1. It replaces the preprocessor kmem flag to gfp flag mapping with
proper translation logic. This eliminates the potential for
surprises that were previously possible where kmem flags were
mapped to gfp flags.
2. It maps vmem_alloc() allocations to kmem_alloc() for allocations
sized less than or equal to the newly-added spl_kmem_alloc_max
parameter. This ensures that small allocations will not contend
on a single global lock, large allocations can still be handled,
and potentially limited virtual address space will not be squandered.
This behavior is entirely different than under Illumos due to
different memory management strategies employed by the respective
kernels. However, this functionally provides the semantics required.
3. The --disable-debug-kmem, --enable-debug-kmem (default), and
--enable-debug-kmem-tracking allocators have been unified in to
a single spl_kmem_alloc_impl() allocation function. This was
done to simplify the code and make it more maintainable.
4. Improve portability by exposing an implementation of the memory
allocations functions that can be safely used in the same way
they are used on Illumos. Specifically, callers may safely
use KM_SLEEP in contexts which perform filesystem IO. This
allows us to eliminate an entire class of Linux specific changes
which were previously required to avoid deadlocking the system.
This change will be largely transparent to existing callers but there
are a few caveats:
1. Because the headers were refactored and extraneous includes removed
callers may find they need to explicitly add additional #includes.
In particular, kmem_cache.h must now be explicitly includes to
access the SPL's kmem cache implementation. This behavior is
different from Illumos but it was done to avoid always masking
the Linux slab functions when kmem.h is included.
2. Callers, like Lustre, which made assumptions about the definitions
of KM_SLEEP, KM_NOSLEEP, and KM_PUSHPAGE will need to be updated.
Other callers such as ZFS which did not will not require changes.
3. KM_PUSHPAGE is no longer overloaded to imply GFP_NOIO. It retains
its original meaning of allowing allocations to access reserved
memory. KM_PUSHPAGE callers can be converted back to KM_SLEEP.
4. The KM_NODEBUG flags has been retired and the default warning
threshold increased to 32k.
5. The kmem_virt() functions has been removed. For callers which
need to distinguish between a physical and virtual address use
is_vmalloc_addr().
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-08 20:37:14 +00:00
|
|
|
ptr[i] = kmem_alloc(size, KM_SLEEP);
|
2008-02-26 20:36:04 +00:00
|
|
|
if (ptr[i])
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
|
2008-02-26 20:36:04 +00:00
|
|
|
if (ptr[i])
|
|
|
|
kmem_free(ptr[i], size);
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST1_NAME,
|
2009-01-31 04:54:49 +00:00
|
|
|
"%d byte allocations, %d/%d successful\n",
|
|
|
|
size, count, SPLAT_KMEM_ALLOC_COUNT);
|
2008-02-27 23:42:31 +00:00
|
|
|
if (count != SPLAT_KMEM_ALLOC_COUNT)
|
2008-02-26 20:36:04 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
|
|
|
|
size *= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2008-02-27 23:42:31 +00:00
|
|
|
splat_kmem_test2(struct file *file, void *arg)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
2008-02-27 23:42:31 +00:00
|
|
|
void *ptr[SPLAT_KMEM_ALLOC_COUNT];
|
2008-02-26 20:36:04 +00:00
|
|
|
int size = PAGE_SIZE;
|
|
|
|
int i, j, count, rc = 0;
|
|
|
|
|
Refactor generic memory allocation interfaces
This patch achieves the following goals:
1. It replaces the preprocessor kmem flag to gfp flag mapping with
proper translation logic. This eliminates the potential for
surprises that were previously possible where kmem flags were
mapped to gfp flags.
2. It maps vmem_alloc() allocations to kmem_alloc() for allocations
sized less than or equal to the newly-added spl_kmem_alloc_max
parameter. This ensures that small allocations will not contend
on a single global lock, large allocations can still be handled,
and potentially limited virtual address space will not be squandered.
This behavior is entirely different than under Illumos due to
different memory management strategies employed by the respective
kernels. However, this functionally provides the semantics required.
3. The --disable-debug-kmem, --enable-debug-kmem (default), and
--enable-debug-kmem-tracking allocators have been unified in to
a single spl_kmem_alloc_impl() allocation function. This was
done to simplify the code and make it more maintainable.
4. Improve portability by exposing an implementation of the memory
allocations functions that can be safely used in the same way
they are used on Illumos. Specifically, callers may safely
use KM_SLEEP in contexts which perform filesystem IO. This
allows us to eliminate an entire class of Linux specific changes
which were previously required to avoid deadlocking the system.
This change will be largely transparent to existing callers but there
are a few caveats:
1. Because the headers were refactored and extraneous includes removed
callers may find they need to explicitly add additional #includes.
In particular, kmem_cache.h must now be explicitly includes to
access the SPL's kmem cache implementation. This behavior is
different from Illumos but it was done to avoid always masking
the Linux slab functions when kmem.h is included.
2. Callers, like Lustre, which made assumptions about the definitions
of KM_SLEEP, KM_NOSLEEP, and KM_PUSHPAGE will need to be updated.
Other callers such as ZFS which did not will not require changes.
3. KM_PUSHPAGE is no longer overloaded to imply GFP_NOIO. It retains
its original meaning of allowing allocations to access reserved
memory. KM_PUSHPAGE callers can be converted back to KM_SLEEP.
4. The KM_NODEBUG flags has been retired and the default warning
threshold increased to 32k.
5. The kmem_virt() functions has been removed. For callers which
need to distinguish between a physical and virtual address use
is_vmalloc_addr().
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-08 20:37:14 +00:00
|
|
|
while ((!rc) && (size <= spl_kmem_alloc_warn)) {
|
2008-02-26 20:36:04 +00:00
|
|
|
count = 0;
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
|
Refactor generic memory allocation interfaces
This patch achieves the following goals:
1. It replaces the preprocessor kmem flag to gfp flag mapping with
proper translation logic. This eliminates the potential for
surprises that were previously possible where kmem flags were
mapped to gfp flags.
2. It maps vmem_alloc() allocations to kmem_alloc() for allocations
sized less than or equal to the newly-added spl_kmem_alloc_max
parameter. This ensures that small allocations will not contend
on a single global lock, large allocations can still be handled,
and potentially limited virtual address space will not be squandered.
This behavior is entirely different than under Illumos due to
different memory management strategies employed by the respective
kernels. However, this functionally provides the semantics required.
3. The --disable-debug-kmem, --enable-debug-kmem (default), and
--enable-debug-kmem-tracking allocators have been unified in to
a single spl_kmem_alloc_impl() allocation function. This was
done to simplify the code and make it more maintainable.
4. Improve portability by exposing an implementation of the memory
allocations functions that can be safely used in the same way
they are used on Illumos. Specifically, callers may safely
use KM_SLEEP in contexts which perform filesystem IO. This
allows us to eliminate an entire class of Linux specific changes
which were previously required to avoid deadlocking the system.
This change will be largely transparent to existing callers but there
are a few caveats:
1. Because the headers were refactored and extraneous includes removed
callers may find they need to explicitly add additional #includes.
In particular, kmem_cache.h must now be explicitly includes to
access the SPL's kmem cache implementation. This behavior is
different from Illumos but it was done to avoid always masking
the Linux slab functions when kmem.h is included.
2. Callers, like Lustre, which made assumptions about the definitions
of KM_SLEEP, KM_NOSLEEP, and KM_PUSHPAGE will need to be updated.
Other callers such as ZFS which did not will not require changes.
3. KM_PUSHPAGE is no longer overloaded to imply GFP_NOIO. It retains
its original meaning of allowing allocations to access reserved
memory. KM_PUSHPAGE callers can be converted back to KM_SLEEP.
4. The KM_NODEBUG flags has been retired and the default warning
threshold increased to 32k.
5. The kmem_virt() functions has been removed. For callers which
need to distinguish between a physical and virtual address use
is_vmalloc_addr().
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-08 20:37:14 +00:00
|
|
|
ptr[i] = kmem_zalloc(size, KM_SLEEP);
|
2008-02-26 20:36:04 +00:00
|
|
|
if (ptr[i])
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure buffer has been zero filled */
|
2008-02-27 23:42:31 +00:00
|
|
|
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++) {
|
2008-02-26 20:36:04 +00:00
|
|
|
for (j = 0; j < size; j++) {
|
|
|
|
if (((char *)ptr[i])[j] != '\0') {
|
2010-05-19 23:53:13 +00:00
|
|
|
splat_vprint(file,SPLAT_KMEM_TEST2_NAME,
|
2009-01-31 04:54:49 +00:00
|
|
|
"%d-byte allocation was "
|
|
|
|
"not zeroed\n", size);
|
2008-02-26 20:36:04 +00:00
|
|
|
rc = -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
for (i = 0; i < SPLAT_KMEM_ALLOC_COUNT; i++)
|
2008-02-26 20:36:04 +00:00
|
|
|
if (ptr[i])
|
|
|
|
kmem_free(ptr[i], size);
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST2_NAME,
|
2009-01-31 04:54:49 +00:00
|
|
|
"%d byte allocations, %d/%d successful\n",
|
|
|
|
size, count, SPLAT_KMEM_ALLOC_COUNT);
|
2008-02-27 23:42:31 +00:00
|
|
|
if (count != SPLAT_KMEM_ALLOC_COUNT)
|
2008-02-26 20:36:04 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
|
|
|
|
size *= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-06-13 23:41:06 +00:00
|
|
|
static int
|
|
|
|
splat_kmem_test3(struct file *file, void *arg)
|
|
|
|
{
|
|
|
|
void *ptr[SPLAT_VMEM_ALLOC_COUNT];
|
|
|
|
int size = PAGE_SIZE;
|
|
|
|
int i, count, rc = 0;
|
|
|
|
|
Refactor generic memory allocation interfaces
This patch achieves the following goals:
1. It replaces the preprocessor kmem flag to gfp flag mapping with
proper translation logic. This eliminates the potential for
surprises that were previously possible where kmem flags were
mapped to gfp flags.
2. It maps vmem_alloc() allocations to kmem_alloc() for allocations
sized less than or equal to the newly-added spl_kmem_alloc_max
parameter. This ensures that small allocations will not contend
on a single global lock, large allocations can still be handled,
and potentially limited virtual address space will not be squandered.
This behavior is entirely different than under Illumos due to
different memory management strategies employed by the respective
kernels. However, this functionally provides the semantics required.
3. The --disable-debug-kmem, --enable-debug-kmem (default), and
--enable-debug-kmem-tracking allocators have been unified in to
a single spl_kmem_alloc_impl() allocation function. This was
done to simplify the code and make it more maintainable.
4. Improve portability by exposing an implementation of the memory
allocations functions that can be safely used in the same way
they are used on Illumos. Specifically, callers may safely
use KM_SLEEP in contexts which perform filesystem IO. This
allows us to eliminate an entire class of Linux specific changes
which were previously required to avoid deadlocking the system.
This change will be largely transparent to existing callers but there
are a few caveats:
1. Because the headers were refactored and extraneous includes removed
callers may find they need to explicitly add additional #includes.
In particular, kmem_cache.h must now be explicitly includes to
access the SPL's kmem cache implementation. This behavior is
different from Illumos but it was done to avoid always masking
the Linux slab functions when kmem.h is included.
2. Callers, like Lustre, which made assumptions about the definitions
of KM_SLEEP, KM_NOSLEEP, and KM_PUSHPAGE will need to be updated.
Other callers such as ZFS which did not will not require changes.
3. KM_PUSHPAGE is no longer overloaded to imply GFP_NOIO. It retains
its original meaning of allowing allocations to access reserved
memory. KM_PUSHPAGE callers can be converted back to KM_SLEEP.
4. The KM_NODEBUG flags has been retired and the default warning
threshold increased to 32k.
5. The kmem_virt() functions has been removed. For callers which
need to distinguish between a physical and virtual address use
is_vmalloc_addr().
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-08 20:37:14 +00:00
|
|
|
/*
|
|
|
|
* Test up to 4x the maximum kmem_alloc() size to ensure both
|
|
|
|
* the kmem_alloc() and vmem_alloc() call paths are used.
|
|
|
|
*/
|
|
|
|
while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
|
2008-06-13 23:41:06 +00:00
|
|
|
count = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
|
|
|
|
ptr[i] = vmem_alloc(size, KM_SLEEP);
|
|
|
|
if (ptr[i])
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
|
|
|
|
if (ptr[i])
|
|
|
|
vmem_free(ptr[i], size);
|
|
|
|
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST3_NAME,
|
2009-01-31 04:54:49 +00:00
|
|
|
"%d byte allocations, %d/%d successful\n",
|
|
|
|
size, count, SPLAT_VMEM_ALLOC_COUNT);
|
2008-06-13 23:41:06 +00:00
|
|
|
if (count != SPLAT_VMEM_ALLOC_COUNT)
|
|
|
|
rc = -ENOMEM;
|
|
|
|
|
|
|
|
size *= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
splat_kmem_test4(struct file *file, void *arg)
|
|
|
|
{
|
|
|
|
void *ptr[SPLAT_VMEM_ALLOC_COUNT];
|
|
|
|
int size = PAGE_SIZE;
|
|
|
|
int i, j, count, rc = 0;
|
|
|
|
|
Refactor generic memory allocation interfaces
This patch achieves the following goals:
1. It replaces the preprocessor kmem flag to gfp flag mapping with
proper translation logic. This eliminates the potential for
surprises that were previously possible where kmem flags were
mapped to gfp flags.
2. It maps vmem_alloc() allocations to kmem_alloc() for allocations
sized less than or equal to the newly-added spl_kmem_alloc_max
parameter. This ensures that small allocations will not contend
on a single global lock, large allocations can still be handled,
and potentially limited virtual address space will not be squandered.
This behavior is entirely different than under Illumos due to
different memory management strategies employed by the respective
kernels. However, this functionally provides the semantics required.
3. The --disable-debug-kmem, --enable-debug-kmem (default), and
--enable-debug-kmem-tracking allocators have been unified in to
a single spl_kmem_alloc_impl() allocation function. This was
done to simplify the code and make it more maintainable.
4. Improve portability by exposing an implementation of the memory
allocations functions that can be safely used in the same way
they are used on Illumos. Specifically, callers may safely
use KM_SLEEP in contexts which perform filesystem IO. This
allows us to eliminate an entire class of Linux specific changes
which were previously required to avoid deadlocking the system.
This change will be largely transparent to existing callers but there
are a few caveats:
1. Because the headers were refactored and extraneous includes removed
callers may find they need to explicitly add additional #includes.
In particular, kmem_cache.h must now be explicitly includes to
access the SPL's kmem cache implementation. This behavior is
different from Illumos but it was done to avoid always masking
the Linux slab functions when kmem.h is included.
2. Callers, like Lustre, which made assumptions about the definitions
of KM_SLEEP, KM_NOSLEEP, and KM_PUSHPAGE will need to be updated.
Other callers such as ZFS which did not will not require changes.
3. KM_PUSHPAGE is no longer overloaded to imply GFP_NOIO. It retains
its original meaning of allowing allocations to access reserved
memory. KM_PUSHPAGE callers can be converted back to KM_SLEEP.
4. The KM_NODEBUG flags has been retired and the default warning
threshold increased to 32k.
5. The kmem_virt() functions has been removed. For callers which
need to distinguish between a physical and virtual address use
is_vmalloc_addr().
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-08 20:37:14 +00:00
|
|
|
/*
|
|
|
|
* Test up to 4x the maximum kmem_zalloc() size to ensure both
|
|
|
|
* the kmem_zalloc() and vmem_zalloc() call paths are used.
|
|
|
|
*/
|
|
|
|
while ((!rc) && (size <= (4 * spl_kmem_alloc_max))) {
|
2008-06-13 23:41:06 +00:00
|
|
|
count = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
|
|
|
|
ptr[i] = vmem_zalloc(size, KM_SLEEP);
|
|
|
|
if (ptr[i])
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Ensure buffer has been zero filled */
|
|
|
|
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++) {
|
|
|
|
for (j = 0; j < size; j++) {
|
|
|
|
if (((char *)ptr[i])[j] != '\0') {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
|
2009-01-31 04:54:49 +00:00
|
|
|
"%d-byte allocation was "
|
|
|
|
"not zeroed\n", size);
|
2008-06-13 23:41:06 +00:00
|
|
|
rc = -EFAULT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < SPLAT_VMEM_ALLOC_COUNT; i++)
|
|
|
|
if (ptr[i])
|
|
|
|
vmem_free(ptr[i], size);
|
|
|
|
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST4_NAME,
|
2009-01-31 04:54:49 +00:00
|
|
|
"%d byte allocations, %d/%d successful\n",
|
|
|
|
size, count, SPLAT_VMEM_ALLOC_COUNT);
|
2008-06-13 23:41:06 +00:00
|
|
|
if (count != SPLAT_VMEM_ALLOC_COUNT)
|
|
|
|
rc = -ENOMEM;
|
|
|
|
|
|
|
|
size *= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
#define SPLAT_KMEM_TEST_MAGIC 0x004488CCUL
|
|
|
|
#define SPLAT_KMEM_CACHE_NAME "kmem_test"
|
2009-01-31 04:54:49 +00:00
|
|
|
#define SPLAT_KMEM_OBJ_COUNT 1024
|
2014-04-07 22:40:20 +00:00
|
|
|
#define SPLAT_KMEM_OBJ_RECLAIM 32 /* objects */
|
2009-01-31 04:54:49 +00:00
|
|
|
#define SPLAT_KMEM_THREADS 32
|
|
|
|
|
|
|
|
#define KCP_FLAG_READY 0x01
|
2008-02-26 20:36:04 +00:00
|
|
|
|
|
|
|
typedef struct kmem_cache_data {
|
|
|
|
unsigned long kcd_magic;
|
2012-08-26 20:34:06 +00:00
|
|
|
struct list_head kcd_node;
|
2008-02-26 20:36:04 +00:00
|
|
|
int kcd_flag;
|
2008-06-13 23:41:06 +00:00
|
|
|
char kcd_buf[0];
|
2008-02-26 20:36:04 +00:00
|
|
|
} kmem_cache_data_t;
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
typedef struct kmem_cache_thread {
|
|
|
|
spinlock_t kct_lock;
|
|
|
|
int kct_id;
|
2012-08-26 20:34:06 +00:00
|
|
|
struct list_head kct_list;
|
2009-01-31 04:54:49 +00:00
|
|
|
} kmem_cache_thread_t;
|
|
|
|
|
2008-02-26 20:36:04 +00:00
|
|
|
typedef struct kmem_cache_priv {
|
|
|
|
unsigned long kcp_magic;
|
|
|
|
struct file *kcp_file;
|
|
|
|
kmem_cache_t *kcp_cache;
|
2008-06-23 23:54:52 +00:00
|
|
|
spinlock_t kcp_lock;
|
2009-01-31 04:54:49 +00:00
|
|
|
wait_queue_head_t kcp_ctl_waitq;
|
|
|
|
wait_queue_head_t kcp_thr_waitq;
|
|
|
|
int kcp_flags;
|
|
|
|
int kcp_kct_count;
|
|
|
|
kmem_cache_thread_t *kcp_kct[SPLAT_KMEM_THREADS];
|
2008-06-13 23:41:06 +00:00
|
|
|
int kcp_size;
|
2009-01-26 17:02:04 +00:00
|
|
|
int kcp_align;
|
2008-02-26 20:36:04 +00:00
|
|
|
int kcp_count;
|
2008-06-23 23:54:52 +00:00
|
|
|
int kcp_alloc;
|
2008-02-26 20:36:04 +00:00
|
|
|
int kcp_rc;
|
|
|
|
} kmem_cache_priv_t;
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
static kmem_cache_priv_t *
|
|
|
|
splat_kmem_cache_test_kcp_alloc(struct file *file, char *name,
|
2012-08-26 20:34:06 +00:00
|
|
|
int size, int align, int alloc)
|
2009-01-31 04:54:49 +00:00
|
|
|
{
|
|
|
|
kmem_cache_priv_t *kcp;
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
kcp = kmem_zalloc(sizeof(kmem_cache_priv_t), KM_SLEEP);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kcp)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
kcp->kcp_magic = SPLAT_KMEM_TEST_MAGIC;
|
|
|
|
kcp->kcp_file = file;
|
|
|
|
kcp->kcp_cache = NULL;
|
|
|
|
spin_lock_init(&kcp->kcp_lock);
|
|
|
|
init_waitqueue_head(&kcp->kcp_ctl_waitq);
|
|
|
|
init_waitqueue_head(&kcp->kcp_thr_waitq);
|
|
|
|
kcp->kcp_flags = 0;
|
|
|
|
kcp->kcp_kct_count = -1;
|
|
|
|
kcp->kcp_size = size;
|
|
|
|
kcp->kcp_align = align;
|
|
|
|
kcp->kcp_count = 0;
|
|
|
|
kcp->kcp_alloc = alloc;
|
|
|
|
kcp->kcp_rc = 0;
|
|
|
|
|
|
|
|
return kcp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
splat_kmem_cache_test_kcp_free(kmem_cache_priv_t *kcp)
|
|
|
|
{
|
2012-08-26 20:34:06 +00:00
|
|
|
kmem_free(kcp, sizeof(kmem_cache_priv_t));
|
2009-01-31 04:54:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static kmem_cache_thread_t *
|
2012-08-26 20:34:06 +00:00
|
|
|
splat_kmem_cache_test_kct_alloc(kmem_cache_priv_t *kcp, int id)
|
2009-01-31 04:54:49 +00:00
|
|
|
{
|
|
|
|
kmem_cache_thread_t *kct;
|
|
|
|
|
2014-11-05 22:30:35 +00:00
|
|
|
ASSERT3S(id, <, SPLAT_KMEM_THREADS);
|
2012-08-26 20:34:06 +00:00
|
|
|
ASSERT(kcp->kcp_kct[id] == NULL);
|
|
|
|
|
|
|
|
kct = kmem_zalloc(sizeof(kmem_cache_thread_t), KM_SLEEP);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kct)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
spin_lock_init(&kct->kct_lock);
|
|
|
|
kct->kct_id = id;
|
2012-08-26 20:34:06 +00:00
|
|
|
INIT_LIST_HEAD(&kct->kct_list);
|
|
|
|
|
|
|
|
spin_lock(&kcp->kcp_lock);
|
|
|
|
kcp->kcp_kct[id] = kct;
|
|
|
|
spin_unlock(&kcp->kcp_lock);
|
2009-01-31 04:54:49 +00:00
|
|
|
|
|
|
|
return kct;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2012-08-26 20:34:06 +00:00
|
|
|
splat_kmem_cache_test_kct_free(kmem_cache_priv_t *kcp,
|
|
|
|
kmem_cache_thread_t *kct)
|
|
|
|
{
|
|
|
|
spin_lock(&kcp->kcp_lock);
|
|
|
|
kcp->kcp_kct[kct->kct_id] = NULL;
|
|
|
|
spin_unlock(&kcp->kcp_lock);
|
|
|
|
|
|
|
|
kmem_free(kct, sizeof(kmem_cache_thread_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
splat_kmem_cache_test_kcd_free(kmem_cache_priv_t *kcp,
|
|
|
|
kmem_cache_thread_t *kct)
|
|
|
|
{
|
|
|
|
kmem_cache_data_t *kcd;
|
|
|
|
|
|
|
|
spin_lock(&kct->kct_lock);
|
|
|
|
while (!list_empty(&kct->kct_list)) {
|
|
|
|
kcd = list_entry(kct->kct_list.next,
|
|
|
|
kmem_cache_data_t, kcd_node);
|
|
|
|
list_del(&kcd->kcd_node);
|
|
|
|
spin_unlock(&kct->kct_lock);
|
|
|
|
|
|
|
|
kmem_cache_free(kcp->kcp_cache, kcd);
|
|
|
|
|
|
|
|
spin_lock(&kct->kct_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&kct->kct_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
splat_kmem_cache_test_kcd_alloc(kmem_cache_priv_t *kcp,
|
|
|
|
kmem_cache_thread_t *kct, int count)
|
2009-01-31 04:54:49 +00:00
|
|
|
{
|
2012-08-26 20:34:06 +00:00
|
|
|
kmem_cache_data_t *kcd;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
kcd = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
|
|
|
|
if (kcd == NULL) {
|
|
|
|
splat_kmem_cache_test_kcd_free(kcp, kct);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&kct->kct_lock);
|
|
|
|
list_add_tail(&kcd->kcd_node, &kct->kct_list);
|
|
|
|
spin_unlock(&kct->kct_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-01-31 04:54:49 +00:00
|
|
|
}
|
|
|
|
|
2012-04-30 22:37:49 +00:00
|
|
|
static void
|
|
|
|
splat_kmem_cache_test_debug(struct file *file, char *name,
|
|
|
|
kmem_cache_priv_t *kcp)
|
|
|
|
{
|
|
|
|
int j;
|
|
|
|
|
2013-12-08 22:01:45 +00:00
|
|
|
splat_vprint(file, name, "%s cache objects %d",
|
|
|
|
kcp->kcp_cache->skc_name, kcp->kcp_count);
|
|
|
|
|
|
|
|
if (kcp->kcp_cache->skc_flags & (KMC_KMEM | KMC_VMEM)) {
|
|
|
|
splat_vprint(file, name, ", slabs %u/%u objs %u/%u",
|
2012-04-30 22:37:49 +00:00
|
|
|
(unsigned)kcp->kcp_cache->skc_slab_alloc,
|
|
|
|
(unsigned)kcp->kcp_cache->skc_slab_total,
|
|
|
|
(unsigned)kcp->kcp_cache->skc_obj_alloc,
|
|
|
|
(unsigned)kcp->kcp_cache->skc_obj_total);
|
|
|
|
|
2013-12-08 22:01:45 +00:00
|
|
|
if (!(kcp->kcp_cache->skc_flags & KMC_NOMAGAZINE)) {
|
|
|
|
splat_vprint(file, name, "%s", "mags");
|
|
|
|
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
splat_print(file, "%u/%u ",
|
|
|
|
kcp->kcp_cache->skc_mag[j]->skm_avail,
|
|
|
|
kcp->kcp_cache->skc_mag[j]->skm_size);
|
|
|
|
}
|
|
|
|
}
|
2012-04-30 22:37:49 +00:00
|
|
|
|
|
|
|
splat_print(file, "%s\n", "");
|
|
|
|
}
|
|
|
|
|
2008-02-26 20:36:04 +00:00
|
|
|
static int
|
2008-06-13 23:41:06 +00:00
|
|
|
splat_kmem_cache_test_constructor(void *ptr, void *priv, int flags)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
|
|
|
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
|
2008-06-13 23:41:06 +00:00
|
|
|
kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-11-04 23:18:31 +00:00
|
|
|
if (kcd && kcp) {
|
|
|
|
kcd->kcd_magic = kcp->kcp_magic;
|
2012-08-26 20:34:06 +00:00
|
|
|
INIT_LIST_HEAD(&kcd->kcd_node);
|
2008-06-13 23:41:06 +00:00
|
|
|
kcd->kcd_flag = 1;
|
2008-11-04 23:18:31 +00:00
|
|
|
memset(kcd->kcd_buf, 0xaa, kcp->kcp_size - (sizeof *kcd));
|
|
|
|
kcp->kcp_count++;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2008-06-13 23:41:06 +00:00
|
|
|
splat_kmem_cache_test_destructor(void *ptr, void *priv)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
|
|
|
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
|
2008-06-13 23:41:06 +00:00
|
|
|
kmem_cache_data_t *kcd = (kmem_cache_data_t *)ptr;
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-11-04 23:18:31 +00:00
|
|
|
if (kcd && kcp) {
|
|
|
|
kcd->kcd_magic = 0;
|
2008-06-13 23:41:06 +00:00
|
|
|
kcd->kcd_flag = 0;
|
2008-11-04 23:18:31 +00:00
|
|
|
memset(kcd->kcd_buf, 0xbb, kcp->kcp_size - (sizeof *kcd));
|
|
|
|
kcp->kcp_count--;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
/*
|
|
|
|
* Generic reclaim function which assumes that all objects may
|
|
|
|
* be reclaimed at any time. We free a small percentage of the
|
|
|
|
* objects linked off the kcp or kct[] every time we are called.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
splat_kmem_cache_test_reclaim(void *priv)
|
|
|
|
{
|
|
|
|
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)priv;
|
|
|
|
kmem_cache_thread_t *kct;
|
2012-08-26 20:34:06 +00:00
|
|
|
kmem_cache_data_t *kcd;
|
|
|
|
LIST_HEAD(reclaim);
|
|
|
|
int i, count;
|
2009-01-31 04:54:49 +00:00
|
|
|
|
|
|
|
ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
/* For each kct thread reclaim some objects */
|
2009-01-31 04:54:49 +00:00
|
|
|
spin_lock(&kcp->kcp_lock);
|
2012-08-26 20:34:06 +00:00
|
|
|
for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
|
2009-01-31 04:54:49 +00:00
|
|
|
kct = kcp->kcp_kct[i];
|
2012-08-26 20:34:06 +00:00
|
|
|
if (!kct)
|
2009-01-31 04:54:49 +00:00
|
|
|
continue;
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
spin_unlock(&kcp->kcp_lock);
|
2009-01-31 04:54:49 +00:00
|
|
|
spin_lock(&kct->kct_lock);
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
count = SPLAT_KMEM_OBJ_RECLAIM;
|
|
|
|
while (count > 0 && !list_empty(&kct->kct_list)) {
|
|
|
|
kcd = list_entry(kct->kct_list.next,
|
|
|
|
kmem_cache_data_t, kcd_node);
|
|
|
|
list_del(&kcd->kcd_node);
|
|
|
|
list_add(&kcd->kcd_node, &reclaim);
|
|
|
|
count--;
|
2009-01-31 04:54:49 +00:00
|
|
|
}
|
2012-08-26 20:34:06 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
spin_unlock(&kct->kct_lock);
|
2012-08-26 20:34:06 +00:00
|
|
|
spin_lock(&kcp->kcp_lock);
|
|
|
|
}
|
|
|
|
spin_unlock(&kcp->kcp_lock);
|
|
|
|
|
|
|
|
/* Freed outside the spin lock */
|
|
|
|
while (!list_empty(&reclaim)) {
|
|
|
|
kcd = list_entry(reclaim.next, kmem_cache_data_t, kcd_node);
|
|
|
|
list_del(&kcd->kcd_node);
|
|
|
|
kmem_cache_free(kcp->kcp_cache, kcd);
|
2009-01-31 04:54:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
splat_kmem_cache_test_threads(kmem_cache_priv_t *kcp, int threads)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
spin_lock(&kcp->kcp_lock);
|
|
|
|
rc = (kcp->kcp_kct_count == threads);
|
|
|
|
spin_unlock(&kcp->kcp_lock);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
splat_kmem_cache_test_flags(kmem_cache_priv_t *kcp, int flags)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
spin_lock(&kcp->kcp_lock);
|
|
|
|
rc = (kcp->kcp_flags & flags);
|
|
|
|
spin_unlock(&kcp->kcp_lock);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
splat_kmem_cache_test_thread(void *arg)
|
|
|
|
{
|
|
|
|
kmem_cache_priv_t *kcp = (kmem_cache_priv_t *)arg;
|
|
|
|
kmem_cache_thread_t *kct;
|
2012-08-26 20:34:06 +00:00
|
|
|
int rc = 0, id;
|
2009-01-31 04:54:49 +00:00
|
|
|
|
|
|
|
ASSERT(kcp->kcp_magic == SPLAT_KMEM_TEST_MAGIC);
|
|
|
|
|
|
|
|
/* Assign thread ids */
|
|
|
|
spin_lock(&kcp->kcp_lock);
|
|
|
|
if (kcp->kcp_kct_count == -1)
|
|
|
|
kcp->kcp_kct_count = 0;
|
|
|
|
|
|
|
|
id = kcp->kcp_kct_count;
|
|
|
|
kcp->kcp_kct_count++;
|
|
|
|
spin_unlock(&kcp->kcp_lock);
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
kct = splat_kmem_cache_test_kct_alloc(kcp, id);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kct) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for all threads to have started and report they are ready */
|
|
|
|
if (kcp->kcp_kct_count == SPLAT_KMEM_THREADS)
|
|
|
|
wake_up(&kcp->kcp_ctl_waitq);
|
|
|
|
|
|
|
|
wait_event(kcp->kcp_thr_waitq,
|
|
|
|
splat_kmem_cache_test_flags(kcp, KCP_FLAG_READY));
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
/* Create and destroy objects */
|
|
|
|
rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, kcp->kcp_alloc);
|
|
|
|
splat_kmem_cache_test_kcd_free(kcp, kct);
|
2009-01-31 04:54:49 +00:00
|
|
|
out:
|
2012-08-26 20:34:06 +00:00
|
|
|
if (kct)
|
|
|
|
splat_kmem_cache_test_kct_free(kcp, kct);
|
2009-01-31 04:54:49 +00:00
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
spin_lock(&kcp->kcp_lock);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kcp->kcp_rc)
|
|
|
|
kcp->kcp_rc = rc;
|
|
|
|
|
|
|
|
if ((--kcp->kcp_kct_count) == 0)
|
|
|
|
wake_up(&kcp->kcp_ctl_waitq);
|
|
|
|
|
|
|
|
spin_unlock(&kcp->kcp_lock);
|
|
|
|
|
|
|
|
thread_exit();
|
|
|
|
}
|
|
|
|
|
2008-02-26 20:36:04 +00:00
|
|
|
static int
|
2009-01-26 17:02:04 +00:00
|
|
|
splat_kmem_cache_test(struct file *file, void *arg, char *name,
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
int size, int align, int flags)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
kmem_cache_priv_t *kcp = NULL;
|
|
|
|
kmem_cache_data_t **kcd = NULL;
|
|
|
|
int i, rc = 0, objs = 0;
|
|
|
|
|
2015-11-16 22:45:42 +00:00
|
|
|
/* Limit size for low memory machines (1/128 of memory) */
|
|
|
|
size = MIN(size, (physmem * PAGE_SIZE) >> 7);
|
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
splat_vprint(file, name,
|
|
|
|
"Testing size=%d, align=%d, flags=0x%04x\n",
|
|
|
|
size, align, flags);
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, align, 0);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kcp) {
|
|
|
|
splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
return (-ENOMEM);
|
2009-01-31 04:54:49 +00:00
|
|
|
}
|
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
kcp->kcp_cache = kmem_cache_create(SPLAT_KMEM_CACHE_NAME,
|
|
|
|
kcp->kcp_size, kcp->kcp_align,
|
|
|
|
splat_kmem_cache_test_constructor,
|
|
|
|
splat_kmem_cache_test_destructor,
|
|
|
|
NULL, kcp, NULL, flags);
|
|
|
|
if (kcp->kcp_cache == NULL) {
|
|
|
|
splat_vprint(file, name, "Unable to create "
|
|
|
|
"name='%s', size=%d, align=%d, flags=0x%x\n",
|
|
|
|
SPLAT_KMEM_CACHE_NAME, size, align, flags);
|
2009-01-31 04:54:49 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_free;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
/*
|
|
|
|
* Allocate several slabs worth of objects to verify functionality.
|
|
|
|
* However, on 32-bit systems with limited address space constrain
|
|
|
|
* it to a single slab for the purposes of this test.
|
|
|
|
*/
|
|
|
|
#ifdef _LP64
|
2015-11-16 22:45:42 +00:00
|
|
|
objs = kcp->kcp_cache->skc_slab_objs * 4;
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
#else
|
|
|
|
objs = 1;
|
|
|
|
#endif
|
|
|
|
kcd = kmem_zalloc(sizeof (kmem_cache_data_t *) * objs, KM_SLEEP);
|
|
|
|
if (kcd == NULL) {
|
|
|
|
splat_vprint(file, name, "Unable to allocate pointers "
|
|
|
|
"for %d objects\n", objs);
|
|
|
|
rc = -ENOMEM;
|
2008-02-26 20:36:04 +00:00
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
for (i = 0; i < objs; i++) {
|
|
|
|
kcd[i] = kmem_cache_alloc(kcp->kcp_cache, KM_SLEEP);
|
|
|
|
if (kcd[i] == NULL) {
|
|
|
|
splat_vprint(file, name, "Unable to allocate "
|
|
|
|
"from '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2008-02-26 20:36:04 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
if (!kcd[i]->kcd_flag) {
|
|
|
|
splat_vprint(file, name, "Failed to run constructor "
|
|
|
|
"for '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kcd[i]->kcd_magic != kcp->kcp_magic) {
|
|
|
|
splat_vprint(file, name,
|
|
|
|
"Failed to pass private data to constructor "
|
|
|
|
"for '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
for (i = 0; i < objs; i++) {
|
|
|
|
kmem_cache_free(kcp->kcp_cache, kcd[i]);
|
|
|
|
|
|
|
|
/* Destructors are run for every kmem_cache_free() */
|
|
|
|
if (kcd[i]->kcd_flag) {
|
|
|
|
splat_vprint(file, name,
|
|
|
|
"Failed to run destructor for '%s'\n",
|
|
|
|
SPLAT_KMEM_CACHE_NAME);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out_free;
|
|
|
|
}
|
|
|
|
}
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
if (kcp->kcp_count) {
|
2008-06-13 23:41:06 +00:00
|
|
|
splat_vprint(file, name,
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
"Failed to run destructor on all slab objects for '%s'\n",
|
|
|
|
SPLAT_KMEM_CACHE_NAME);
|
2008-02-26 20:36:04 +00:00
|
|
|
rc = -EINVAL;
|
|
|
|
}
|
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
|
|
|
|
kmem_cache_destroy(kcp->kcp_cache);
|
|
|
|
|
2009-03-18 18:56:00 +00:00
|
|
|
splat_kmem_cache_test_kcp_free(kcp);
|
2008-06-13 23:41:06 +00:00
|
|
|
splat_vprint(file, name,
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
"Success ran alloc'd/free'd %d objects of size %d\n",
|
|
|
|
objs, size);
|
2008-02-26 20:36:04 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
return (rc);
|
2008-02-26 20:36:04 +00:00
|
|
|
|
|
|
|
out_free:
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
if (kcd) {
|
|
|
|
for (i = 0; i < objs; i++) {
|
|
|
|
if (kcd[i] != NULL)
|
|
|
|
kmem_cache_free(kcp->kcp_cache, kcd[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
kmem_free(kcd, sizeof (kmem_cache_data_t *) * objs);
|
|
|
|
}
|
2009-01-31 04:54:49 +00:00
|
|
|
|
|
|
|
if (kcp->kcp_cache)
|
|
|
|
kmem_cache_destroy(kcp->kcp_cache);
|
|
|
|
|
|
|
|
splat_kmem_cache_test_kcp_free(kcp);
|
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
return (rc);
|
2009-01-31 04:54:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
splat_kmem_cache_thread_test(struct file *file, void *arg, char *name,
|
2009-01-31 05:24:42 +00:00
|
|
|
int size, int alloc, int max_time)
|
2009-01-31 04:54:49 +00:00
|
|
|
{
|
|
|
|
kmem_cache_priv_t *kcp;
|
|
|
|
kthread_t *thr;
|
|
|
|
struct timespec start, stop, delta;
|
|
|
|
char cache_name[32];
|
|
|
|
int i, rc = 0;
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
kcp = splat_kmem_cache_test_kcp_alloc(file, name, size, 0, alloc);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kcp) {
|
|
|
|
splat_vprint(file, name, "Unable to create '%s'\n", "kcp");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
(void)snprintf(cache_name, 32, "%s-%d-%d",
|
|
|
|
SPLAT_KMEM_CACHE_NAME, size, alloc);
|
|
|
|
kcp->kcp_cache =
|
|
|
|
kmem_cache_create(cache_name, kcp->kcp_size, 0,
|
|
|
|
splat_kmem_cache_test_constructor,
|
|
|
|
splat_kmem_cache_test_destructor,
|
|
|
|
splat_kmem_cache_test_reclaim,
|
2009-07-23 20:50:53 +00:00
|
|
|
kcp, NULL, 0);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kcp->kcp_cache) {
|
|
|
|
splat_vprint(file, name, "Unable to create '%s'\n", cache_name);
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_kcp;
|
|
|
|
}
|
|
|
|
|
2013-08-18 14:51:06 +00:00
|
|
|
getnstimeofday(&start);
|
2009-01-31 04:54:49 +00:00
|
|
|
|
|
|
|
for (i = 0; i < SPLAT_KMEM_THREADS; i++) {
|
|
|
|
thr = thread_create(NULL, 0,
|
|
|
|
splat_kmem_cache_test_thread,
|
2015-07-23 18:21:08 +00:00
|
|
|
kcp, 0, &p0, TS_RUN, defclsyspri);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (thr == NULL) {
|
|
|
|
rc = -ESRCH;
|
|
|
|
goto out_cache;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sleep until all threads have started, then set the ready
|
|
|
|
* flag and wake them all up for maximum concurrency. */
|
|
|
|
wait_event(kcp->kcp_ctl_waitq,
|
|
|
|
splat_kmem_cache_test_threads(kcp, SPLAT_KMEM_THREADS));
|
|
|
|
|
|
|
|
spin_lock(&kcp->kcp_lock);
|
|
|
|
kcp->kcp_flags |= KCP_FLAG_READY;
|
|
|
|
spin_unlock(&kcp->kcp_lock);
|
|
|
|
wake_up_all(&kcp->kcp_thr_waitq);
|
|
|
|
|
|
|
|
/* Sleep until all thread have finished */
|
|
|
|
wait_event(kcp->kcp_ctl_waitq, splat_kmem_cache_test_threads(kcp, 0));
|
|
|
|
|
2013-08-18 14:51:06 +00:00
|
|
|
getnstimeofday(&stop);
|
2009-01-31 04:54:49 +00:00
|
|
|
delta = timespec_sub(stop, start);
|
2008-02-27 19:09:51 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_vprint(file, name,
|
|
|
|
"%-22s %2ld.%09ld\t"
|
|
|
|
"%lu/%lu/%lu\t%lu/%lu/%lu\n",
|
|
|
|
kcp->kcp_cache->skc_name,
|
|
|
|
delta.tv_sec, delta.tv_nsec,
|
|
|
|
(unsigned long)kcp->kcp_cache->skc_slab_total,
|
|
|
|
(unsigned long)kcp->kcp_cache->skc_slab_max,
|
|
|
|
(unsigned long)(kcp->kcp_alloc *
|
|
|
|
SPLAT_KMEM_THREADS /
|
|
|
|
SPL_KMEM_CACHE_OBJ_PER_SLAB),
|
|
|
|
(unsigned long)kcp->kcp_cache->skc_obj_total,
|
|
|
|
(unsigned long)kcp->kcp_cache->skc_obj_max,
|
|
|
|
(unsigned long)(kcp->kcp_alloc *
|
|
|
|
SPLAT_KMEM_THREADS));
|
|
|
|
|
2009-01-31 05:24:42 +00:00
|
|
|
if (delta.tv_sec >= max_time)
|
2009-01-31 04:54:49 +00:00
|
|
|
rc = -ETIME;
|
|
|
|
|
|
|
|
if (!rc && kcp->kcp_rc)
|
|
|
|
rc = kcp->kcp_rc;
|
|
|
|
|
|
|
|
out_cache:
|
|
|
|
kmem_cache_destroy(kcp->kcp_cache);
|
|
|
|
out_kcp:
|
|
|
|
splat_kmem_cache_test_kcp_free(kcp);
|
2008-02-26 20:36:04 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-07-01 03:28:54 +00:00
|
|
|
/* Validate small object cache behavior for dynamic/kmem/vmem caches */
|
2008-06-13 23:41:06 +00:00
|
|
|
static int
|
|
|
|
splat_kmem_test5(struct file *file, void *arg)
|
|
|
|
{
|
2008-07-01 03:28:54 +00:00
|
|
|
char *name = SPLAT_KMEM_TEST5_NAME;
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
int i, rc = 0;
|
2008-07-01 03:28:54 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
/* Randomly pick small object sizes and alignments. */
|
|
|
|
for (i = 0; i < 100; i++) {
|
|
|
|
int size, align, flags = 0;
|
|
|
|
uint32_t rnd;
|
|
|
|
|
|
|
|
/* Evenly distribute tests over all value cache types */
|
|
|
|
get_random_bytes((void *)&rnd, sizeof (uint32_t));
|
|
|
|
switch (rnd & 0x03) {
|
|
|
|
default:
|
|
|
|
case 0x00:
|
|
|
|
flags = 0;
|
|
|
|
break;
|
|
|
|
case 0x01:
|
|
|
|
flags = KMC_KMEM;
|
|
|
|
break;
|
|
|
|
case 0x02:
|
|
|
|
flags = KMC_VMEM;
|
|
|
|
break;
|
|
|
|
case 0x03:
|
|
|
|
flags = KMC_SLAB;
|
|
|
|
break;
|
|
|
|
}
|
2008-07-01 03:28:54 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
/* The following flags are set with a 1/10 chance */
|
|
|
|
flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0);
|
|
|
|
flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0);
|
2013-07-19 21:39:35 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
/* 32b - PAGE_SIZE */
|
|
|
|
get_random_bytes((void *)&rnd, sizeof (uint32_t));
|
|
|
|
size = MAX(rnd % (PAGE_SIZE + 1), 32);
|
2013-07-19 21:39:35 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
/* 2^N where (3 <= N <= PAGE_SHIFT) */
|
|
|
|
get_random_bytes((void *)&rnd, sizeof (uint32_t));
|
|
|
|
align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1)));
|
2013-07-19 21:39:35 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
rc = splat_kmem_cache_test(file, arg, name, size, align, flags);
|
|
|
|
if (rc)
|
|
|
|
return (rc);
|
|
|
|
}
|
2013-07-19 21:39:35 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
return (rc);
|
2008-06-13 23:41:06 +00:00
|
|
|
}
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
/*
|
|
|
|
* Validate large object cache behavior for dynamic/kmem/vmem caches
|
|
|
|
*/
|
2008-06-13 23:41:06 +00:00
|
|
|
static int
|
|
|
|
splat_kmem_test6(struct file *file, void *arg)
|
|
|
|
{
|
2008-07-01 03:28:54 +00:00
|
|
|
char *name = SPLAT_KMEM_TEST6_NAME;
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
int i, max_size, rc = 0;
|
|
|
|
|
|
|
|
/* Randomly pick large object sizes and alignments. */
|
|
|
|
for (i = 0; i < 100; i++) {
|
|
|
|
int size, align, flags = 0;
|
|
|
|
uint32_t rnd;
|
|
|
|
|
|
|
|
/* Evenly distribute tests over all value cache types */
|
|
|
|
get_random_bytes((void *)&rnd, sizeof (uint32_t));
|
|
|
|
switch (rnd & 0x03) {
|
|
|
|
default:
|
|
|
|
case 0x00:
|
|
|
|
flags = 0;
|
|
|
|
max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
|
|
|
|
break;
|
|
|
|
case 0x01:
|
|
|
|
flags = KMC_KMEM;
|
|
|
|
max_size = (SPL_MAX_ORDER_NR_PAGES - 2) * PAGE_SIZE;
|
|
|
|
break;
|
|
|
|
case 0x02:
|
|
|
|
flags = KMC_VMEM;
|
|
|
|
max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
|
|
|
|
break;
|
|
|
|
case 0x03:
|
|
|
|
flags = KMC_SLAB;
|
|
|
|
max_size = SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
|
|
|
|
break;
|
|
|
|
}
|
2014-11-03 21:50:39 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
/* The following flags are set with a 1/10 chance */
|
|
|
|
flags |= ((((rnd >> 8) % 10) == 0) ? KMC_OFFSLAB : 0);
|
|
|
|
flags |= ((((rnd >> 16) % 10) == 0) ? KMC_NOEMERGENCY : 0);
|
2013-07-19 21:39:35 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
/* PAGE_SIZE - max_size */
|
|
|
|
get_random_bytes((void *)&rnd, sizeof (uint32_t));
|
|
|
|
size = MAX(rnd % (max_size + 1), PAGE_SIZE),
|
2013-07-19 21:39:35 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
/* 2^N where (3 <= N <= PAGE_SHIFT) */
|
|
|
|
get_random_bytes((void *)&rnd, sizeof (uint32_t));
|
|
|
|
align = (1 << MAX(3, rnd % (PAGE_SHIFT + 1)));
|
2014-11-03 21:50:39 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
rc = splat_kmem_cache_test(file, arg, name, size, align, flags);
|
|
|
|
if (rc)
|
|
|
|
return (rc);
|
|
|
|
}
|
2013-07-19 21:39:35 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
return (rc);
|
2008-06-13 23:41:06 +00:00
|
|
|
}
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
/*
|
|
|
|
* Validate object alignment cache behavior for caches
|
|
|
|
*/
|
2009-01-31 04:54:49 +00:00
|
|
|
static int
|
|
|
|
splat_kmem_test7(struct file *file, void *arg)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
2009-01-31 04:54:49 +00:00
|
|
|
char *name = SPLAT_KMEM_TEST7_NAME;
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
int max_size = (SPL_KMEM_CACHE_MAX_SIZE * 1024 * 1024) / 2;
|
2009-01-31 04:54:49 +00:00
|
|
|
int i, rc;
|
2008-06-13 23:41:06 +00:00
|
|
|
|
2009-11-13 19:12:43 +00:00
|
|
|
for (i = SPL_KMEM_CACHE_ALIGN; i <= PAGE_SIZE; i *= 2) {
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
uint32_t size;
|
|
|
|
|
|
|
|
get_random_bytes((void *)&size, sizeof (uint32_t));
|
|
|
|
size = MAX(size % (max_size + 1), 32);
|
|
|
|
|
|
|
|
rc = splat_kmem_cache_test(file, arg, name, size, i, 0);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (rc)
|
|
|
|
return rc;
|
2013-07-19 21:39:35 +00:00
|
|
|
|
Refine slab cache sizing
This change is designed to improve the memory utilization of
slabs by more carefully setting their size. The way the code
currently works is problematic for slabs which contain large
objects (>1MB). This is due to slabs being unconditionally
rounded up to a power of two which may result in unused space
at the end of the slab.
The reason the existing code rounds up every slab is because it
assumes it will backed by the buddy allocator. Since the buddy
allocator can only performs power of two allocations this is
desirable because it avoids wasting any space. However, this
logic breaks down if slab is backed by vmalloc() which operates
at a page level granularity. In this case, the optimal thing to
do is calculate the minimum required slab size given certain
constraints (object size, alignment, objects/slab, etc).
Therefore, this patch reworks the spl_slab_size() function so
that it sizes KMC_KMEM slabs differently than KMC_VMEM slabs.
KMC_KMEM slabs are rounded up to the nearest power of two, and
KMC_VMEM slabs are allowed to be the minimum required size.
This change also reduces the default number of objects per slab.
This reduces how much memory a single cache object can pin, which
can result in significant memory saving for highly fragmented
caches. But depending on the workload it may result in slabs
being allocated and freed more frequently. In practice, this
has been shown to be a better default for most workloads.
Also the maximum slab size has been reduced to 4MB on 32-bit
systems. Due to the limited virtual address space it's critical
the we be as frugal as possible. A limit of 4M still lets us
reasonably comfortably allocate a limited number of 1MB objects.
Finally, the kmem:slab_small and kmem:slab_large SPLAT tests
were extended to provide better test coverage of various object
sizes and alignments. Caches are created with random parameters
and their basic functionality is verified by allocating several
slabs worth of objects.
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
2014-12-15 22:06:18 +00:00
|
|
|
rc = splat_kmem_cache_test(file, arg, name, size, i,
|
2013-07-19 21:39:35 +00:00
|
|
|
KMC_OFFSLAB);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
return rc;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
/*
|
|
|
|
* Validate kmem_cache_reap() by requesting the slab cache free any objects
|
|
|
|
* it can. For a few reasons this may not immediately result in more free
|
|
|
|
* memory even if objects are freed. First off, due to fragmentation we
|
|
|
|
* may not be able to reclaim any slabs. Secondly, even if we do we fully
|
|
|
|
* clear some slabs we will not want to immediately reclaim all of them
|
|
|
|
* because we may contend with cache allocations and thrash. What we want
|
|
|
|
* to see is the slab size decrease more gradually as it becomes clear they
|
|
|
|
* will not be needed. This should be achievable in less than a minute.
|
|
|
|
* If it takes longer than this something has gone wrong.
|
|
|
|
*/
|
2008-02-26 20:36:04 +00:00
|
|
|
static int
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_kmem_test8(struct file *file, void *arg)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
2009-01-31 04:54:49 +00:00
|
|
|
kmem_cache_priv_t *kcp;
|
2012-08-26 20:34:06 +00:00
|
|
|
kmem_cache_thread_t *kct;
|
2013-01-18 23:44:27 +00:00
|
|
|
unsigned int spl_kmem_cache_expire_old;
|
2012-04-30 22:37:49 +00:00
|
|
|
int i, rc = 0;
|
2009-01-31 04:54:49 +00:00
|
|
|
|
2013-01-18 23:44:27 +00:00
|
|
|
/* Enable cache aging just for this test if it is disabled */
|
|
|
|
spl_kmem_cache_expire_old = spl_kmem_cache_expire;
|
|
|
|
spl_kmem_cache_expire = KMC_EXPIRE_AGE;
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST8_NAME,
|
2012-08-26 20:34:06 +00:00
|
|
|
256, 0, 0);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kcp) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
|
|
|
|
"Unable to create '%s'\n", "kcp");
|
2012-08-26 20:34:06 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
kcp->kcp_cache =
|
|
|
|
kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
|
|
|
|
splat_kmem_cache_test_constructor,
|
|
|
|
splat_kmem_cache_test_destructor,
|
|
|
|
splat_kmem_cache_test_reclaim,
|
|
|
|
kcp, NULL, 0);
|
|
|
|
if (!kcp->kcp_cache) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
|
|
|
|
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
2012-08-26 20:34:06 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_kcp;
|
2009-01-31 04:54:49 +00:00
|
|
|
}
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
|
|
|
|
if (!kct) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
|
|
|
|
"Unable to create '%s'\n", "kct");
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, SPLAT_KMEM_OBJ_COUNT);
|
|
|
|
if (rc) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST8_NAME, "Unable to "
|
|
|
|
"allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
|
|
|
goto out_kct;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
2014-04-07 22:40:20 +00:00
|
|
|
/* Force reclaim every 1/10 a second for 60 seconds. */
|
|
|
|
for (i = 0; i < 600; i++) {
|
2009-01-31 04:54:49 +00:00
|
|
|
kmem_cache_reap_now(kcp->kcp_cache);
|
2012-04-30 22:37:49 +00:00
|
|
|
splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST8_NAME, kcp);
|
2009-01-31 04:54:49 +00:00
|
|
|
|
2013-12-08 22:01:45 +00:00
|
|
|
if (kcp->kcp_count == 0)
|
2008-06-13 23:41:06 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
2014-04-07 22:40:20 +00:00
|
|
|
schedule_timeout(HZ / 10);
|
2008-06-13 23:41:06 +00:00
|
|
|
}
|
|
|
|
|
2013-12-08 22:01:45 +00:00
|
|
|
if (kcp->kcp_count == 0) {
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
|
2008-06-13 23:41:06 +00:00
|
|
|
"Successfully created %d objects "
|
|
|
|
"in cache %s and reclaimed them\n",
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
|
2008-06-13 23:41:06 +00:00
|
|
|
} else {
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
|
2008-06-13 23:41:06 +00:00
|
|
|
"Failed to reclaim %u/%d objects from cache %s\n",
|
2013-12-08 22:01:45 +00:00
|
|
|
(unsigned)kcp->kcp_count,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_OBJ_COUNT, SPLAT_KMEM_CACHE_NAME);
|
2008-06-13 23:41:06 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
}
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2008-06-13 23:41:06 +00:00
|
|
|
/* Cleanup our mess (for failure case of time expiring) */
|
2012-08-26 20:34:06 +00:00
|
|
|
splat_kmem_cache_test_kcd_free(kcp, kct);
|
|
|
|
out_kct:
|
|
|
|
splat_kmem_cache_test_kct_free(kcp, kct);
|
|
|
|
out_cache:
|
2009-01-31 04:54:49 +00:00
|
|
|
kmem_cache_destroy(kcp->kcp_cache);
|
2012-08-26 20:34:06 +00:00
|
|
|
out_kcp:
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_kmem_cache_test_kcp_free(kcp);
|
2012-08-26 20:34:06 +00:00
|
|
|
out:
|
2013-01-18 23:44:27 +00:00
|
|
|
spl_kmem_cache_expire = spl_kmem_cache_expire_old;
|
|
|
|
|
2008-02-26 20:36:04 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
/* Test cache aging, we have allocated a large number of objects thus
|
|
|
|
* creating a large number of slabs and then free'd them all. However,
|
|
|
|
* since there should be little memory pressure at the moment those
|
|
|
|
* slabs have not been freed. What we want to see is the slab size
|
|
|
|
* decrease gradually as it becomes clear they will not be be needed.
|
|
|
|
* This should be achievable in less than minute. If it takes longer
|
|
|
|
* than this something has gone wrong.
|
|
|
|
*/
|
2009-01-31 04:54:49 +00:00
|
|
|
static int
|
|
|
|
splat_kmem_test9(struct file *file, void *arg)
|
2008-06-23 23:54:52 +00:00
|
|
|
{
|
2009-01-31 04:54:49 +00:00
|
|
|
kmem_cache_priv_t *kcp;
|
2012-08-26 20:34:06 +00:00
|
|
|
kmem_cache_thread_t *kct;
|
2013-01-18 23:44:27 +00:00
|
|
|
unsigned int spl_kmem_cache_expire_old;
|
2012-04-30 22:37:49 +00:00
|
|
|
int i, rc = 0, count = SPLAT_KMEM_OBJ_COUNT * 128;
|
2009-01-31 04:54:49 +00:00
|
|
|
|
2013-01-18 23:44:27 +00:00
|
|
|
/* Enable cache aging just for this test if it is disabled */
|
|
|
|
spl_kmem_cache_expire_old = spl_kmem_cache_expire;
|
|
|
|
spl_kmem_cache_expire = KMC_EXPIRE_AGE;
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST9_NAME,
|
2012-08-26 20:34:06 +00:00
|
|
|
256, 0, 0);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (!kcp) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
|
|
|
|
"Unable to create '%s'\n", "kcp");
|
2012-08-26 20:34:06 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
2009-01-31 04:54:49 +00:00
|
|
|
}
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
kcp->kcp_cache =
|
|
|
|
kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
|
|
|
|
splat_kmem_cache_test_constructor,
|
|
|
|
splat_kmem_cache_test_destructor,
|
|
|
|
NULL, kcp, NULL, 0);
|
|
|
|
if (!kcp->kcp_cache) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
|
|
|
|
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
2012-08-26 20:34:06 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_kcp;
|
2008-06-23 23:54:52 +00:00
|
|
|
}
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
|
|
|
|
if (!kct) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST8_NAME,
|
|
|
|
"Unable to create '%s'\n", "kct");
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_cache;
|
2008-06-23 23:54:52 +00:00
|
|
|
}
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
|
|
|
|
if (rc) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST9_NAME, "Unable to "
|
|
|
|
"allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
|
|
|
goto out_kct;
|
|
|
|
}
|
|
|
|
|
|
|
|
splat_kmem_cache_test_kcd_free(kcp, kct);
|
2008-06-26 19:49:42 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
for (i = 0; i < 60; i++) {
|
2012-04-30 22:37:49 +00:00
|
|
|
splat_kmem_cache_test_debug(file, SPLAT_KMEM_TEST9_NAME, kcp);
|
2009-01-31 04:54:49 +00:00
|
|
|
|
2013-12-08 22:01:45 +00:00
|
|
|
if (kcp->kcp_count == 0)
|
2009-01-31 04:54:49 +00:00
|
|
|
break;
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
|
|
schedule_timeout(HZ);
|
|
|
|
}
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2013-12-08 22:01:45 +00:00
|
|
|
if (kcp->kcp_count == 0) {
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
|
|
|
|
"Successfully created %d objects "
|
|
|
|
"in cache %s and reclaimed them\n",
|
|
|
|
count, SPLAT_KMEM_CACHE_NAME);
|
|
|
|
} else {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST9_NAME,
|
|
|
|
"Failed to reclaim %u/%d objects from cache %s\n",
|
2013-12-08 22:01:45 +00:00
|
|
|
(unsigned)kcp->kcp_count, count,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_CACHE_NAME);
|
|
|
|
rc = -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
out_kct:
|
|
|
|
splat_kmem_cache_test_kct_free(kcp, kct);
|
|
|
|
out_cache:
|
2009-01-31 04:54:49 +00:00
|
|
|
kmem_cache_destroy(kcp->kcp_cache);
|
2012-08-26 20:34:06 +00:00
|
|
|
out_kcp:
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_kmem_cache_test_kcp_free(kcp);
|
2012-08-26 20:34:06 +00:00
|
|
|
out:
|
2013-01-18 23:44:27 +00:00
|
|
|
spl_kmem_cache_expire = spl_kmem_cache_expire_old;
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
return rc;
|
2008-06-23 23:54:52 +00:00
|
|
|
}
|
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
/*
|
|
|
|
* This test creates N threads with a shared kmem cache. They then all
|
|
|
|
* concurrently allocate and free from the cache to stress the locking and
|
|
|
|
* concurrent cache performance. If any one test takes longer than 5
|
|
|
|
* seconds to complete it is treated as a failure and may indicate a
|
|
|
|
* performance regression. On my test system no one test takes more
|
|
|
|
* than 1 second to complete so a 5x slowdown likely a problem.
|
2008-06-23 23:54:52 +00:00
|
|
|
*/
|
|
|
|
static int
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_kmem_test10(struct file *file, void *arg)
|
2008-06-23 23:54:52 +00:00
|
|
|
{
|
2016-10-28 20:56:38 +00:00
|
|
|
uint64_t size, alloc, maxsize, limit, rc = 0;
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2016-10-28 20:56:38 +00:00
|
|
|
#if defined(CONFIG_64BIT)
|
|
|
|
maxsize = (1024 * 1024);
|
|
|
|
#else
|
|
|
|
maxsize = (128 * 1024);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
for (size = 32; size <= maxsize; size *= 2) {
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "name",
|
|
|
|
"time (sec)\tslabs \tobjs \thash\n");
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST10_NAME, "%-22s %s", "",
|
|
|
|
" \ttot/max/calc\ttot/max/calc\n");
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
for (alloc = 1; alloc <= 1024; alloc *= 2) {
|
2008-06-23 23:54:52 +00:00
|
|
|
|
2016-10-28 20:56:38 +00:00
|
|
|
/* Skip tests which exceed 1/2 of memory. */
|
|
|
|
limit = MIN(physmem * PAGE_SIZE,
|
|
|
|
vmem_size(NULL, VMEM_ALLOC | VMEM_FREE)) / 2;
|
|
|
|
if (size * alloc * SPLAT_KMEM_THREADS > limit)
|
2009-01-31 04:54:49 +00:00
|
|
|
continue;
|
2008-11-05 21:43:37 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
rc = splat_kmem_cache_thread_test(file, arg,
|
2009-01-31 05:24:42 +00:00
|
|
|
SPLAT_KMEM_TEST10_NAME, size, alloc, 5);
|
2009-01-31 04:54:49 +00:00
|
|
|
if (rc)
|
|
|
|
break;
|
|
|
|
}
|
2008-06-23 23:54:52 +00:00
|
|
|
}
|
|
|
|
|
2008-11-05 21:43:37 +00:00
|
|
|
return rc;
|
2008-06-23 23:54:52 +00:00
|
|
|
}
|
|
|
|
|
2012-11-02 23:13:50 +00:00
|
|
|
#if 0
|
2009-01-31 04:54:49 +00:00
|
|
|
/*
|
|
|
|
* This test creates N threads with a shared kmem cache which overcommits
|
|
|
|
* memory by 4x. This makes it impossible for the slab to satify the
|
|
|
|
* thread requirements without having its reclaim hook run which will
|
|
|
|
* free objects back for use. This behavior is triggered by the linum VM
|
|
|
|
* detecting a low memory condition on the node and invoking the shrinkers.
|
|
|
|
* This should allow all the threads to complete while avoiding deadlock
|
|
|
|
* and for the most part out of memory events. This is very tough on the
|
2009-12-01 19:40:47 +00:00
|
|
|
* system so it is possible the test app may get oom'ed. This particular
|
|
|
|
* test has proven troublesome on 32-bit archs with limited virtual
|
|
|
|
* address space so it only run on 64-bit systems.
|
2009-01-31 04:54:49 +00:00
|
|
|
*/
|
2008-06-28 05:04:46 +00:00
|
|
|
static int
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_kmem_test11(struct file *file, void *arg)
|
2008-06-28 05:04:46 +00:00
|
|
|
{
|
2009-01-31 04:54:49 +00:00
|
|
|
uint64_t size, alloc, rc;
|
2008-06-28 05:04:46 +00:00
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
size = 8 * 1024;
|
2009-03-17 19:16:31 +00:00
|
|
|
alloc = ((4 * physmem * PAGE_SIZE) / size) / SPLAT_KMEM_THREADS;
|
2008-06-28 05:04:46 +00:00
|
|
|
|
2009-03-17 19:16:31 +00:00
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "name",
|
2009-01-31 04:54:49 +00:00
|
|
|
"time (sec)\tslabs \tobjs \thash\n");
|
2009-03-17 19:16:31 +00:00
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST11_NAME, "%-22s %s", "",
|
2009-01-31 04:54:49 +00:00
|
|
|
" \ttot/max/calc\ttot/max/calc\n");
|
2009-01-26 17:02:04 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
rc = splat_kmem_cache_thread_test(file, arg,
|
2009-01-31 05:24:42 +00:00
|
|
|
SPLAT_KMEM_TEST11_NAME, size, alloc, 60);
|
2009-01-26 17:02:04 +00:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2012-11-02 23:13:50 +00:00
|
|
|
#endif
|
2009-01-26 17:02:04 +00:00
|
|
|
|
2012-04-30 22:37:49 +00:00
|
|
|
typedef struct dummy_page {
|
|
|
|
struct list_head dp_list;
|
|
|
|
char dp_pad[PAGE_SIZE - sizeof(struct list_head)];
|
|
|
|
} dummy_page_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This test is designed to verify that direct reclaim is functioning as
|
|
|
|
* expected. We allocate a large number of objects thus creating a large
|
|
|
|
* number of slabs. We then apply memory pressure and expect that the
|
|
|
|
* direct reclaim path can easily recover those slabs. The registered
|
|
|
|
* reclaim function will free the objects and the slab shrinker will call
|
|
|
|
* it repeatedly until at least a single slab can be freed.
|
|
|
|
*
|
|
|
|
* Note it may not be possible to reclaim every last slab via direct reclaim
|
|
|
|
* without a failure because the shrinker_rwsem may be contended. For this
|
|
|
|
* reason, quickly reclaiming 3/4 of the slabs is considered a success.
|
|
|
|
*
|
|
|
|
* This should all be possible within 10 seconds. For reference, on a
|
|
|
|
* system with 2G of memory this test takes roughly 0.2 seconds to run.
|
|
|
|
* It may take longer on larger memory systems but should still easily
|
|
|
|
* complete in the alloted 10 seconds.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
splat_kmem_test13(struct file *file, void *arg)
|
|
|
|
{
|
|
|
|
kmem_cache_priv_t *kcp;
|
2012-08-26 20:34:06 +00:00
|
|
|
kmem_cache_thread_t *kct;
|
2012-04-30 22:37:49 +00:00
|
|
|
dummy_page_t *dp;
|
|
|
|
struct list_head list;
|
2013-08-18 14:51:06 +00:00
|
|
|
struct timespec start, stop, delta = { 0, 0 };
|
2012-04-30 22:37:49 +00:00
|
|
|
int size, count, slabs, fails = 0;
|
|
|
|
int i, rc = 0, max_time = 10;
|
|
|
|
|
|
|
|
size = 128 * 1024;
|
2016-10-28 20:56:38 +00:00
|
|
|
count = MIN(physmem * PAGE_SIZE, vmem_size(NULL,
|
|
|
|
VMEM_ALLOC | VMEM_FREE)) / 4 / size;
|
2012-04-30 22:37:49 +00:00
|
|
|
|
|
|
|
kcp = splat_kmem_cache_test_kcp_alloc(file, SPLAT_KMEM_TEST13_NAME,
|
2012-08-26 20:34:06 +00:00
|
|
|
size, 0, 0);
|
2012-04-30 22:37:49 +00:00
|
|
|
if (!kcp) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
|
|
|
|
"Unable to create '%s'\n", "kcp");
|
2012-08-26 20:34:06 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
2012-04-30 22:37:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
kcp->kcp_cache =
|
|
|
|
kmem_cache_create(SPLAT_KMEM_CACHE_NAME, kcp->kcp_size, 0,
|
|
|
|
splat_kmem_cache_test_constructor,
|
|
|
|
splat_kmem_cache_test_destructor,
|
|
|
|
splat_kmem_cache_test_reclaim,
|
|
|
|
kcp, NULL, 0);
|
|
|
|
if (!kcp->kcp_cache) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
|
|
|
|
"Unable to create '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
2012-08-26 20:34:06 +00:00
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_kcp;
|
2012-04-30 22:37:49 +00:00
|
|
|
}
|
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
kct = splat_kmem_cache_test_kct_alloc(kcp, 0);
|
|
|
|
if (!kct) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
|
|
|
|
"Unable to create '%s'\n", "kct");
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out_cache;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = splat_kmem_cache_test_kcd_alloc(kcp, kct, count);
|
|
|
|
if (rc) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST13_NAME, "Unable to "
|
|
|
|
"allocate from '%s'\n", SPLAT_KMEM_CACHE_NAME);
|
|
|
|
goto out_kct;
|
2012-04-30 22:37:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
slabs = kcp->kcp_cache->skc_slab_total;
|
|
|
|
INIT_LIST_HEAD(&list);
|
2013-08-18 14:51:06 +00:00
|
|
|
getnstimeofday(&start);
|
2012-04-30 22:37:49 +00:00
|
|
|
|
2012-08-26 20:34:06 +00:00
|
|
|
/* Apply memory pressure */
|
2012-04-30 22:37:49 +00:00
|
|
|
while (kcp->kcp_cache->skc_slab_total > (slabs >> 2)) {
|
|
|
|
|
|
|
|
if ((i % 10000) == 0)
|
|
|
|
splat_kmem_cache_test_debug(
|
|
|
|
file, SPLAT_KMEM_TEST13_NAME, kcp);
|
|
|
|
|
2013-08-18 14:51:06 +00:00
|
|
|
getnstimeofday(&stop);
|
|
|
|
delta = timespec_sub(stop, start);
|
2012-04-30 22:37:49 +00:00
|
|
|
if (delta.tv_sec >= max_time) {
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
|
|
|
|
"Failed to reclaim 3/4 of cache in %ds, "
|
|
|
|
"%u/%u slabs remain\n", max_time,
|
|
|
|
(unsigned)kcp->kcp_cache->skc_slab_total,
|
|
|
|
slabs);
|
|
|
|
rc = -ETIME;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-04-07 22:40:20 +00:00
|
|
|
dp = (dummy_page_t *)__get_free_page(GFP_KERNEL);
|
2012-04-30 22:37:49 +00:00
|
|
|
if (!dp) {
|
|
|
|
fails++;
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
|
|
|
|
"Failed (%d) to allocate page with %u "
|
|
|
|
"slabs still in the cache\n", fails,
|
|
|
|
(unsigned)kcp->kcp_cache->skc_slab_total);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
list_add(&dp->dp_list, &list);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc == 0)
|
|
|
|
splat_vprint(file, SPLAT_KMEM_TEST13_NAME,
|
|
|
|
"Successfully created %u slabs and with %d alloc "
|
|
|
|
"failures reclaimed 3/4 of them in %d.%03ds\n",
|
|
|
|
slabs, fails,
|
|
|
|
(int)delta.tv_sec, (int)delta.tv_nsec / 1000000);
|
|
|
|
|
|
|
|
/* Release memory pressure pages */
|
|
|
|
while (!list_empty(&list)) {
|
|
|
|
dp = list_entry(list.next, dummy_page_t, dp_list);
|
|
|
|
list_del_init(&dp->dp_list);
|
|
|
|
free_page((unsigned long)dp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release remaining kmem cache objects */
|
2012-08-26 20:34:06 +00:00
|
|
|
splat_kmem_cache_test_kcd_free(kcp, kct);
|
|
|
|
out_kct:
|
|
|
|
splat_kmem_cache_test_kct_free(kcp, kct);
|
|
|
|
out_cache:
|
2012-04-30 22:37:49 +00:00
|
|
|
kmem_cache_destroy(kcp->kcp_cache);
|
2012-08-26 20:34:06 +00:00
|
|
|
out_kcp:
|
2012-04-30 22:37:49 +00:00
|
|
|
splat_kmem_cache_test_kcp_free(kcp);
|
2012-08-26 20:34:06 +00:00
|
|
|
out:
|
2012-04-30 22:37:49 +00:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2008-02-27 23:42:31 +00:00
|
|
|
splat_subsystem_t *
|
|
|
|
splat_kmem_init(void)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
2009-01-31 04:54:49 +00:00
|
|
|
splat_subsystem_t *sub;
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
sub = kmalloc(sizeof(*sub), GFP_KERNEL);
|
|
|
|
if (sub == NULL)
|
|
|
|
return NULL;
|
2008-02-26 20:36:04 +00:00
|
|
|
|
2009-01-31 04:54:49 +00:00
|
|
|
memset(sub, 0, sizeof(*sub));
|
|
|
|
strncpy(sub->desc.name, SPLAT_KMEM_NAME, SPLAT_NAME_SIZE);
|
2008-02-27 23:42:31 +00:00
|
|
|
strncpy(sub->desc.desc, SPLAT_KMEM_DESC, SPLAT_DESC_SIZE);
|
2009-01-31 04:54:49 +00:00
|
|
|
INIT_LIST_HEAD(&sub->subsystem_list);
|
2008-02-26 20:36:04 +00:00
|
|
|
INIT_LIST_HEAD(&sub->test_list);
|
2009-01-31 04:54:49 +00:00
|
|
|
spin_lock_init(&sub->test_lock);
|
|
|
|
sub->desc.id = SPLAT_SUBSYSTEM_KMEM;
|
|
|
|
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST1_NAME, SPLAT_KMEM_TEST1_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST1_ID, splat_kmem_test1);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST2_NAME, SPLAT_KMEM_TEST2_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST2_ID, splat_kmem_test2);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST3_NAME, SPLAT_KMEM_TEST3_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST3_ID, splat_kmem_test3);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST4_NAME, SPLAT_KMEM_TEST4_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST4_ID, splat_kmem_test4);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST5_NAME, SPLAT_KMEM_TEST5_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST5_ID, splat_kmem_test5);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST6_NAME, SPLAT_KMEM_TEST6_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST6_ID, splat_kmem_test6);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST7_NAME, SPLAT_KMEM_TEST7_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST7_ID, splat_kmem_test7);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST8_NAME, SPLAT_KMEM_TEST8_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST8_ID, splat_kmem_test8);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST9_NAME, SPLAT_KMEM_TEST9_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST9_ID, splat_kmem_test9);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST10_NAME, SPLAT_KMEM_TEST10_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST10_ID, splat_kmem_test10);
|
2012-11-02 23:13:50 +00:00
|
|
|
#if 0
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
|
2009-01-31 04:54:49 +00:00
|
|
|
SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
|
2012-11-02 23:13:50 +00:00
|
|
|
#endif
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_init(sub, SPLAT_KMEM_TEST13_NAME, SPLAT_KMEM_TEST13_DESC,
|
2012-04-30 22:37:49 +00:00
|
|
|
SPLAT_KMEM_TEST13_ID, splat_kmem_test13);
|
2009-01-31 04:54:49 +00:00
|
|
|
|
|
|
|
return sub;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-02-27 23:42:31 +00:00
|
|
|
splat_kmem_fini(splat_subsystem_t *sub)
|
2008-02-26 20:36:04 +00:00
|
|
|
{
|
2009-01-31 04:54:49 +00:00
|
|
|
ASSERT(sub);
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST13_ID);
|
2012-11-02 23:13:50 +00:00
|
|
|
#if 0
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST11_ID);
|
2012-11-02 23:13:50 +00:00
|
|
|
#endif
|
2016-12-15 02:24:47 +00:00
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST10_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST9_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST8_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST7_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST6_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST5_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST4_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST3_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST2_ID);
|
|
|
|
splat_test_fini(sub, SPLAT_KMEM_TEST1_ID);
|
2009-01-31 04:54:49 +00:00
|
|
|
|
|
|
|
kfree(sub);
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-02-27 23:42:31 +00:00
|
|
|
splat_kmem_id(void) {
|
2009-01-31 04:54:49 +00:00
|
|
|
return SPLAT_SUBSYSTEM_KMEM;
|
2008-02-26 20:36:04 +00:00
|
|
|
}
|