Remove global memory variables

Platforms such as Illumos and FreeBSD have historically provided
global variables which summerize the memory state of a system.
Linux on the otherhand doesn't expose any of this information
to kernel modules and uses entirely different mechanisms for
memory management.

In order to simplify the original ZFS port to Linux these global
variables were emulated by the SPL for the benefit of ZFS.  As ZoL
has matured over the years it has moved steadily away from these
interfaces and now no longer depends on them at all.

Therefore, this patch completely removes the global variables
availrmem, minfree, desfree, lotsfree, needfree, swapfs_minfree,
and swapfs_reserve.  This greatly simplifies the memory management
code and eliminates a common area of confusion.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
Brian Behlendorf 2014-10-01 18:34:41 -04:00
parent e1310afae3
commit 8bbbe46f86
6 changed files with 7 additions and 747 deletions

View File

@ -29,16 +29,6 @@ AC_DEFUN([SPL_AC_CONFIG_KERNEL], [
SPL_AC_MUTEX_OWNER
SPL_AC_MUTEX_OWNER_TASK_STRUCT
SPL_AC_KALLSYMS_LOOKUP_NAME
SPL_AC_PGDAT_HELPERS
SPL_AC_FIRST_ONLINE_PGDAT
SPL_AC_NEXT_ONLINE_PGDAT
SPL_AC_NEXT_ZONE
SPL_AC_PGDAT_LIST
SPL_AC_GLOBAL_PAGE_STATE
SPL_AC_ZONE_STAT_ITEM_FREE
SPL_AC_ZONE_STAT_ITEM_INACTIVE
SPL_AC_ZONE_STAT_ITEM_ACTIVE
SPL_AC_GET_ZONE_COUNTS
SPL_AC_USER_PATH_DIR
SPL_AC_SET_FS_PWD
SPL_AC_SET_FS_PWD_WITH_CONST
@ -1006,317 +996,6 @@ AC_DEFUN([SPL_AC_PDE_DATA], [
])
])
dnl #
dnl # 2.6.17 API change
dnl # The helper functions first_online_pgdat(), next_online_pgdat(), and
dnl # next_zone() are introduced to simplify for_each_zone(). These symbols
dnl # were exported in 2.6.17 for use by modules which was consistent with
dnl # the previous implementation of for_each_zone(). From 2.6.18 - 2.6.19
dnl # the symbols were exported as 'unused', and by 2.6.20 they exports
dnl # were dropped entirely leaving modules no way to directly iterate over
dnl # the zone list. Because we need access to the zone helpers we check
dnl # if the kernel contains the old or new implementation. Then we check
dnl # to see if the symbols we need for each version are available. If they
dnl # are not, dynamically aquire the addresses with kallsyms_lookup_name().
dnl #
AC_DEFUN([SPL_AC_PGDAT_HELPERS], [
AC_MSG_CHECKING([whether symbol *_pgdat exist])
grep -q -E 'first_online_pgdat' $LINUX/include/linux/mmzone.h 2>/dev/null
rc=$?
if test $rc -eq 0; then
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_PGDAT_HELPERS, 1, [pgdat helpers are available])
else
AC_MSG_RESULT([no])
fi
])
dnl #
dnl # Proposed API change,
dnl # This symbol is not available in stock kernels. You may build a
dnl # custom kernel with the *-spl-export-symbols.patch which will export
dnl # these symbols for use. If your already rolling a custom kernel for
dnl # your environment this is recommended.
dnl #
AC_DEFUN([SPL_AC_FIRST_ONLINE_PGDAT],
[AC_MSG_CHECKING([whether first_online_pgdat() is available])
SPL_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/mmzone.h>
], [
first_online_pgdat();
], [first_online_pgdat], [], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_FIRST_ONLINE_PGDAT, 1,
[first_online_pgdat() is available])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Proposed API change,
dnl # This symbol is not available in stock kernels. You may build a
dnl # custom kernel with the *-spl-export-symbols.patch which will export
dnl # these symbols for use. If your already rolling a custom kernel for
dnl # your environment this is recommended.
dnl #
AC_DEFUN([SPL_AC_NEXT_ONLINE_PGDAT],
[AC_MSG_CHECKING([whether next_online_pgdat() is available])
SPL_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/mmzone.h>
], [
next_online_pgdat(NULL);
], [next_online_pgdat], [], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_NEXT_ONLINE_PGDAT, 1,
[next_online_pgdat() is available])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Proposed API change,
dnl # This symbol is not available in stock kernels. You may build a
dnl # custom kernel with the *-spl-export-symbols.patch which will export
dnl # these symbols for use. If your already rolling a custom kernel for
dnl # your environment this is recommended.
dnl #
AC_DEFUN([SPL_AC_NEXT_ZONE],
[AC_MSG_CHECKING([whether next_zone() is available])
SPL_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/mmzone.h>
], [
next_zone(NULL);
], [next_zone], [], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_NEXT_ZONE, 1, [next_zone() is available])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.17 API change,
dnl # See SPL_AC_PGDAT_HELPERS for details.
dnl #
AC_DEFUN([SPL_AC_PGDAT_LIST],
[AC_MSG_CHECKING([whether pgdat_list is available])
SPL_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/topology.h>
pg_data_t *tmp = pgdat_list;
], [], [pgdat_list], [], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PGDAT_LIST, 1, [pgdat_list is available])
], [
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.18 API change,
dnl # First introduced global_page_state() support as an inline.
dnl #
AC_DEFUN([SPL_AC_GLOBAL_PAGE_STATE], [
AC_MSG_CHECKING([whether global_page_state() is available])
SPL_LINUX_TRY_COMPILE([
#include <linux/mm.h>
],[
unsigned long state __attribute__ ((unused));
state = global_page_state(0);
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GLOBAL_PAGE_STATE, 1,
[global_page_state() is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.21 API change (plus subsequent naming convention changes),
dnl # Public global zone stats now include a free page count. However
dnl # the enumerated names of the counters have changed since this API
dnl # was introduced. We need to deduce the corrent name to use. This
dnl # replaces the priviate get_zone_counts() interface.
dnl #
dnl # NR_FREE_PAGES was available from 2.6.21 to current kernels, which
dnl # is 2.6.30 as of when this was written.
dnl #
AC_DEFUN([SPL_AC_ZONE_STAT_ITEM_FREE], [
AC_MSG_CHECKING([whether page state NR_FREE_PAGES is available])
SPL_LINUX_TRY_COMPILE([
#include <linux/mm.h>
],[
enum zone_stat_item zsi __attribute__ ((unused));
zsi = NR_FREE_PAGES;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES, 1,
[Page state NR_FREE_PAGES is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.21 API change (plus subsequent naming convention changes),
dnl # Public global zone stats now include an inactive page count. However
dnl # the enumerated names of the counters have changed since this API
dnl # was introduced. We need to deduce the corrent name to use. This
dnl # replaces the priviate get_zone_counts() interface.
dnl #
dnl # NR_INACTIVE was available from 2.6.21 to 2.6.27 and included both
dnl # anonymous and file inactive pages. As of 2.6.28 it was split in
dnl # to NR_INACTIVE_ANON and NR_INACTIVE_FILE.
dnl #
AC_DEFUN([SPL_AC_ZONE_STAT_ITEM_INACTIVE], [
AC_MSG_CHECKING([whether page state NR_INACTIVE is available])
SPL_LINUX_TRY_COMPILE([
#include <linux/mm.h>
],[
enum zone_stat_item zsi __attribute__ ((unused));
zsi = NR_INACTIVE;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ZONE_STAT_ITEM_NR_INACTIVE, 1,
[Page state NR_INACTIVE is available])
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether page state NR_INACTIVE_ANON is available])
SPL_LINUX_TRY_COMPILE([
#include <linux/mm.h>
],[
enum zone_stat_item zsi __attribute__ ((unused));
zsi = NR_INACTIVE_ANON;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON, 1,
[Page state NR_INACTIVE_ANON is available])
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether page state NR_INACTIVE_FILE is available])
SPL_LINUX_TRY_COMPILE([
#include <linux/mm.h>
],[
enum zone_stat_item zsi __attribute__ ((unused));
zsi = NR_INACTIVE_FILE;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE, 1,
[Page state NR_INACTIVE_FILE is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.21 API change (plus subsequent naming convention changes),
dnl # Public global zone stats now include an active page count. However
dnl # the enumerated names of the counters have changed since this API
dnl # was introduced. We need to deduce the corrent name to use. This
dnl # replaces the priviate get_zone_counts() interface.
dnl #
dnl # NR_ACTIVE was available from 2.6.21 to 2.6.27 and included both
dnl # anonymous and file active pages. As of 2.6.28 it was split in
dnl # to NR_ACTIVE_ANON and NR_ACTIVE_FILE.
dnl #
AC_DEFUN([SPL_AC_ZONE_STAT_ITEM_ACTIVE], [
AC_MSG_CHECKING([whether page state NR_ACTIVE is available])
SPL_LINUX_TRY_COMPILE([
#include <linux/mm.h>
],[
enum zone_stat_item zsi __attribute__ ((unused));
zsi = NR_ACTIVE;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ZONE_STAT_ITEM_NR_ACTIVE, 1,
[Page state NR_ACTIVE is available])
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether page state NR_ACTIVE_ANON is available])
SPL_LINUX_TRY_COMPILE([
#include <linux/mm.h>
],[
enum zone_stat_item zsi __attribute__ ((unused));
zsi = NR_ACTIVE_ANON;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON, 1,
[Page state NR_ACTIVE_ANON is available])
],[
AC_MSG_RESULT(no)
])
AC_MSG_CHECKING([whether page state NR_ACTIVE_FILE is available])
SPL_LINUX_TRY_COMPILE([
#include <linux/mm.h>
],[
enum zone_stat_item zsi __attribute__ ((unused));
zsi = NR_ACTIVE_FILE;
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE, 1,
[Page state NR_ACTIVE_FILE is available])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # Proposed API change for legacy kernels.
dnl # This symbol is not available in older kernels. For kernels post
dnl # 2.6.21 the global_page_state() API is used to get free/inactive/active
dnl # page state information. This symbol is only used in legacy kernels
dnl # any only as a last resort.
dnl
AC_DEFUN([SPL_AC_GET_ZONE_COUNTS], [
AC_MSG_CHECKING([whether symbol get_zone_counts is needed])
SPL_LINUX_TRY_COMPILE([
],[
#if !defined(HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES)
#error "global_page_state needs NR_FREE_PAGES"
#endif
#if !defined(HAVE_ZONE_STAT_ITEM_NR_ACTIVE) && \
!defined(HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON) && \
!defined(HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE)
#error "global_page_state needs NR_ACTIVE*"
#endif
#if !defined(HAVE_ZONE_STAT_ITEM_NR_INACTIVE) && \
!defined(HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON) && \
!defined(HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE)
#error "global_page_state needs NR_INACTIVE*"
#endif
],[
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(NEED_GET_ZONE_COUNTS, 1,
[get_zone_counts() is needed])
AC_MSG_CHECKING([whether get_zone_counts() is available])
SPL_LINUX_TRY_COMPILE_SYMBOL([
#include <linux/mmzone.h>
], [
get_zone_counts(NULL, NULL, NULL);
], [get_zone_counts], [], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_GET_ZONE_COUNTS, 1,
[get_zone_counts() is available])
], [
AC_MSG_RESULT(no)
])
])
])
dnl #
dnl # 2.6.27 API change,
dnl # The user_path_dir() replaces __user_walk()

View File

@ -28,22 +28,6 @@
#include <linux/mm.h>
#include <linux/fs.h>
/*
* Linux 2.6.31 API Change.
* Individual pages_{min,low,high} moved in to watermark array.
*/
#ifndef min_wmark_pages
#define min_wmark_pages(z) (z->pages_min)
#endif
#ifndef low_wmark_pages
#define low_wmark_pages(z) (z->pages_low)
#endif
#ifndef high_wmark_pages
#define high_wmark_pages(z) (z->pages_high)
#endif
#if !defined(HAVE_SHRINK_CONTROL_STRUCT)
struct shrink_control {
gfp_t gfp_mask;

View File

@ -33,31 +33,14 @@
#include <sys/types.h>
#include <asm/uaccess.h>
/* These values are loosely coupled with the VM page reclaim.
* Linux uses its own heuristics to trigger page reclamation, and
* because those interface are difficult to interface with. These
* values should only be considered as a rough guide to the system
* memory state and not as direct evidence that page reclamation.
* is or is not currently in progress.
*/
#define membar_producer() smp_wmb()
#define physmem totalram_pages
#define freemem nr_free_pages()
#define availrmem spl_kmem_availrmem()
extern pgcnt_t minfree; /* Sum of zone->pages_min */
extern pgcnt_t desfree; /* Sum of zone->pages_low */
extern pgcnt_t lotsfree; /* Sum of zone->pages_high */
extern pgcnt_t needfree; /* Always 0 unused in new Solaris */
extern pgcnt_t swapfs_minfree; /* Solaris default value */
extern pgcnt_t swapfs_reserve; /* Solaris default value */
#define membar_producer() smp_wmb()
#define physmem totalram_pages
#define freemem nr_free_pages()
extern vmem_t *heap_arena; /* primary kernel heap arena */
extern vmem_t *zio_alloc_arena; /* arena for zio caches */
extern vmem_t *zio_arena; /* arena for allocating zio memory */
extern pgcnt_t spl_kmem_availrmem(void);
extern size_t vmem_size(vmem_t *vmp, int typemask);
#define VMEM_ALLOC 0x01
@ -67,54 +50,8 @@ extern size_t vmem_size(vmem_t *vmp, int typemask);
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
#endif
#ifdef HAVE_PGDAT_HELPERS
/* Source linux/mm/mmzone.c */
# ifndef HAVE_FIRST_ONLINE_PGDAT
typedef struct pglist_data *(*first_online_pgdat_t)(void);
extern first_online_pgdat_t first_online_pgdat_fn;
# define first_online_pgdat() first_online_pgdat_fn()
# endif /* HAVE_FIRST_ONLINE_PGDAT */
# ifndef HAVE_NEXT_ONLINE_PGDAT
typedef struct pglist_data *(*next_online_pgdat_t)(struct pglist_data *);
extern next_online_pgdat_t next_online_pgdat_fn;
# define next_online_pgdat(pgd) next_online_pgdat_fn(pgd)
# endif /* HAVE_NEXT_ONLINE_PGDAT */
# ifndef HAVE_NEXT_ZONE
typedef struct zone *(*next_zone_t)(struct zone *);
extern next_zone_t next_zone_fn;
# define next_zone(zone) next_zone_fn(zone)
# endif /* HAVE_NEXT_ZONE */
#else /* HAVE_PGDAT_HELPERS */
# ifndef HAVE_PGDAT_LIST
extern struct pglist_data *pgdat_list_addr;
# define pgdat_list pgdat_list_addr
# endif /* HAVE_PGDAT_LIST */
#endif /* HAVE_PGDAT_HELPERS */
/* Source linux/mm/vmstat.c */
#if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
typedef void (*get_zone_counts_t)(unsigned long *, unsigned long *,
unsigned long *);
extern get_zone_counts_t get_zone_counts_fn;
# define get_zone_counts(a,i,f) get_zone_counts_fn(a,i,f)
#endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
typedef enum spl_zone_stat_item {
SPL_NR_FREE_PAGES,
SPL_NR_INACTIVE,
SPL_NR_ACTIVE,
SPL_NR_ZONE_STAT_ITEMS
} spl_zone_stat_item_t;
extern unsigned long spl_global_page_state(spl_zone_stat_item_t);
#define xcopyin(from, to, size) copy_from_user(to, from, size)
#define xcopyout(from, to, size) copy_to_user(to, from, size)
#define xcopyin(from, to, size) copy_from_user(to, from, size)
#define xcopyout(from, to, size) copy_to_user(to, from, size)
static __inline__ int
copyin(const void *from, void *to, size_t len)

View File

@ -101,44 +101,6 @@ module_param(spl_kmem_cache_kmem_limit, uint, 0644);
MODULE_PARM_DESC(spl_kmem_cache_kmem_limit,
"Objects less than N bytes use the kmalloc");
/*
* The minimum amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_min
* multiplied by the number of zones and is sized based on that.
*/
pgcnt_t minfree = 0;
EXPORT_SYMBOL(minfree);
/*
* The desired amount of memory measured in pages to be free at all
* times on the system. This is similar to Linux's zone->pages_low
* multiplied by the number of zones and is sized based on that.
* Assuming all zones are being used roughly equally, when we drop
* below this threshold asynchronous page reclamation is triggered.
*/
pgcnt_t desfree = 0;
EXPORT_SYMBOL(desfree);
/*
* When above this amount of memory measures in pages the system is
* determined to have enough free memory. This is similar to Linux's
* zone->pages_high multiplied by the number of zones and is sized based
* on that. Assuming all zones are being used roughly equally, when
* asynchronous page reclamation reaches this threshold it stops.
*/
pgcnt_t lotsfree = 0;
EXPORT_SYMBOL(lotsfree);
/* Unused always 0 in this implementation */
pgcnt_t needfree = 0;
EXPORT_SYMBOL(needfree);
pgcnt_t swapfs_minfree = 0;
EXPORT_SYMBOL(swapfs_minfree);
pgcnt_t swapfs_reserve = 0;
EXPORT_SYMBOL(swapfs_reserve);
vmem_t *heap_arena = NULL;
EXPORT_SYMBOL(heap_arena);
@ -148,101 +110,6 @@ EXPORT_SYMBOL(zio_alloc_arena);
vmem_t *zio_arena = NULL;
EXPORT_SYMBOL(zio_arena);
#ifdef HAVE_PGDAT_HELPERS
# ifndef HAVE_FIRST_ONLINE_PGDAT
first_online_pgdat_t first_online_pgdat_fn = SYMBOL_POISON;
EXPORT_SYMBOL(first_online_pgdat_fn);
# endif /* HAVE_FIRST_ONLINE_PGDAT */
# ifndef HAVE_NEXT_ONLINE_PGDAT
next_online_pgdat_t next_online_pgdat_fn = SYMBOL_POISON;
EXPORT_SYMBOL(next_online_pgdat_fn);
# endif /* HAVE_NEXT_ONLINE_PGDAT */
# ifndef HAVE_NEXT_ZONE
next_zone_t next_zone_fn = SYMBOL_POISON;
EXPORT_SYMBOL(next_zone_fn);
# endif /* HAVE_NEXT_ZONE */
#else /* HAVE_PGDAT_HELPERS */
# ifndef HAVE_PGDAT_LIST
struct pglist_data *pgdat_list_addr = SYMBOL_POISON;
EXPORT_SYMBOL(pgdat_list_addr);
# endif /* HAVE_PGDAT_LIST */
#endif /* HAVE_PGDAT_HELPERS */
#ifdef NEED_GET_ZONE_COUNTS
# ifndef HAVE_GET_ZONE_COUNTS
get_zone_counts_t get_zone_counts_fn = SYMBOL_POISON;
EXPORT_SYMBOL(get_zone_counts_fn);
# endif /* HAVE_GET_ZONE_COUNTS */
unsigned long
spl_global_page_state(spl_zone_stat_item_t item)
{
unsigned long active;
unsigned long inactive;
unsigned long free;
get_zone_counts(&active, &inactive, &free);
switch (item) {
case SPL_NR_FREE_PAGES: return free;
case SPL_NR_INACTIVE: return inactive;
case SPL_NR_ACTIVE: return active;
default: ASSERT(0); /* Unsupported */
}
return 0;
}
#else
# ifdef HAVE_GLOBAL_PAGE_STATE
unsigned long
spl_global_page_state(spl_zone_stat_item_t item)
{
unsigned long pages = 0;
switch (item) {
case SPL_NR_FREE_PAGES:
# ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
pages += global_page_state(NR_FREE_PAGES);
# endif
break;
case SPL_NR_INACTIVE:
# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
pages += global_page_state(NR_INACTIVE);
# endif
# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
pages += global_page_state(NR_INACTIVE_ANON);
# endif
# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
pages += global_page_state(NR_INACTIVE_FILE);
# endif
break;
case SPL_NR_ACTIVE:
# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
pages += global_page_state(NR_ACTIVE);
# endif
# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
pages += global_page_state(NR_ACTIVE_ANON);
# endif
# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
pages += global_page_state(NR_ACTIVE_FILE);
# endif
break;
default:
ASSERT(0); /* Unsupported */
}
return pages;
}
# else
# error "Both global_page_state() and get_zone_counts() unavailable"
# endif /* HAVE_GLOBAL_PAGE_STATE */
#endif /* NEED_GET_ZONE_COUNTS */
EXPORT_SYMBOL(spl_global_page_state);
#ifndef HAVE_SHRINK_DCACHE_MEMORY
shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
EXPORT_SYMBOL(shrink_dcache_memory_fn);
@ -253,15 +120,6 @@ shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON;
EXPORT_SYMBOL(shrink_icache_memory_fn);
#endif /* HAVE_SHRINK_ICACHE_MEMORY */
pgcnt_t
spl_kmem_availrmem(void)
{
/* The amount of easily available memory */
return (spl_global_page_state(SPL_NR_FREE_PAGES) +
spl_global_page_state(SPL_NR_INACTIVE));
}
EXPORT_SYMBOL(spl_kmem_availrmem);
size_t
vmem_size(vmem_t *vmp, int typemask)
{
@ -2458,90 +2316,12 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
#define spl_kmem_fini_tracking(list, lock)
#endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
static void
spl_kmem_init_globals(void)
{
struct zone *zone;
/* For now all zones are includes, it may be wise to restrict
* this to normal and highmem zones if we see problems. */
for_each_zone(zone) {
if (!populated_zone(zone))
continue;
minfree += min_wmark_pages(zone);
desfree += low_wmark_pages(zone);
lotsfree += high_wmark_pages(zone);
}
/* Solaris default values */
swapfs_minfree = MAX(2*1024*1024 >> PAGE_SHIFT, physmem >> 3);
swapfs_reserve = MIN(4*1024*1024 >> PAGE_SHIFT, physmem >> 4);
}
/*
* Called at module init when it is safe to use spl_kallsyms_lookup_name()
*/
int
spl_kmem_init_kallsyms_lookup(void)
{
#ifdef HAVE_PGDAT_HELPERS
# ifndef HAVE_FIRST_ONLINE_PGDAT
first_online_pgdat_fn = (first_online_pgdat_t)
spl_kallsyms_lookup_name("first_online_pgdat");
if (!first_online_pgdat_fn) {
printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n");
return -EFAULT;
}
# endif /* HAVE_FIRST_ONLINE_PGDAT */
# ifndef HAVE_NEXT_ONLINE_PGDAT
next_online_pgdat_fn = (next_online_pgdat_t)
spl_kallsyms_lookup_name("next_online_pgdat");
if (!next_online_pgdat_fn) {
printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n");
return -EFAULT;
}
# endif /* HAVE_NEXT_ONLINE_PGDAT */
# ifndef HAVE_NEXT_ZONE
next_zone_fn = (next_zone_t)
spl_kallsyms_lookup_name("next_zone");
if (!next_zone_fn) {
printk(KERN_ERR "Error: Unknown symbol next_zone\n");
return -EFAULT;
}
# endif /* HAVE_NEXT_ZONE */
#else /* HAVE_PGDAT_HELPERS */
# ifndef HAVE_PGDAT_LIST
pgdat_list_addr = *(struct pglist_data **)
spl_kallsyms_lookup_name("pgdat_list");
if (!pgdat_list_addr) {
printk(KERN_ERR "Error: Unknown symbol pgdat_list\n");
return -EFAULT;
}
# endif /* HAVE_PGDAT_LIST */
#endif /* HAVE_PGDAT_HELPERS */
#if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
get_zone_counts_fn = (get_zone_counts_t)
spl_kallsyms_lookup_name("get_zone_counts");
if (!get_zone_counts_fn) {
printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n");
return -EFAULT;
}
#endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
/*
* It is now safe to initialize the global tunings which rely on
* the use of the for_each_zone() macro. This macro in turns
* depends on the *_pgdat symbols which are now available.
*/
spl_kmem_init_globals();
#ifndef HAVE_SHRINK_DCACHE_MEMORY
/* When shrink_dcache_memory_fn == NULL support is disabled */
shrink_dcache_memory_fn = (shrink_dcache_memory_t)

View File

@ -454,55 +454,6 @@ SPL_PROC_HANDLER(proc_dokallsyms_lookup_name)
}
#endif /* HAVE_KALLSYMS_LOOKUP_NAME */
SPL_PROC_HANDLER(proc_doavailrmem)
{
int len, rc = 0;
char str[32];
SENTRY;
if (write) {
*ppos += *lenp;
} else {
len = snprintf(str, sizeof(str), "%lu",
(unsigned long)availrmem);
if (*ppos >= len)
rc = 0;
else
rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
if (rc >= 0) {
*lenp = rc;
*ppos += rc;
}
}
SRETURN(rc);
}
SPL_PROC_HANDLER(proc_dofreemem)
{
int len, rc = 0;
char str[32];
SENTRY;
if (write) {
*ppos += *lenp;
} else {
len = snprintf(str, sizeof(str), "%lu", (unsigned long)freemem);
if (*ppos >= len)
rc = 0;
else
rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
if (rc >= 0) {
*lenp = rc;
*ppos += rc;
}
}
SRETURN(rc);
}
#ifdef DEBUG_KMEM
static void
slab_seq_show_headers(struct seq_file *f)
@ -719,71 +670,6 @@ static struct ctl_table spl_debug_table[] = {
};
#endif /* DEBUG_LOG */
static struct ctl_table spl_vm_table[] = {
{
.procname = "minfree",
.data = &minfree,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.procname = "desfree",
.data = &desfree,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.procname = "lotsfree",
.data = &lotsfree,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.procname = "needfree",
.data = &needfree,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = &proc_dointvec,
},
{
.procname = "swapfs_minfree",
.data = &swapfs_minfree,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.procname = "swapfs_reserve",
.data = &swapfs_reserve,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
{
.procname = "availrmem",
.mode = 0444,
.proc_handler = &proc_doavailrmem,
},
{
.procname = "freemem",
.data = (void *)2,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = &proc_dofreemem,
},
{
.procname = "physmem",
.data = &physmem,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = &proc_dointvec,
},
{0},
};
#ifdef DEBUG_KMEM
static struct ctl_table spl_kmem_table[] = {
{
@ -922,11 +808,6 @@ static struct ctl_table spl_table[] = {
.child = spl_debug_table,
},
#endif
{
.procname = "vm",
.mode = 0555,
.child = spl_vm_table,
},
#ifdef DEBUG_KMEM
{
.procname = "kmem",

View File

@ -1052,9 +1052,8 @@ splat_kmem_test10(struct file *file, void *arg)
for (alloc = 1; alloc <= 1024; alloc *= 2) {
/* Skip tests which exceed available memory. We
* leverage availrmem here for some extra testing */
if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
/* Skip tests which exceed 1/2 of physical memory. */
if (size * alloc * SPLAT_KMEM_THREADS > physmem / 2)
continue;
rc = splat_kmem_cache_thread_test(file, arg,