Merge branch 'cleanup'
Over the years the SPL code bases has accumulated compatibly code to allow it to build against a wide range of Linux kernels. In general this is desirable because it makes the code flexible. However, once support for these old kernels is no longer needed and is no longer being actively tested it should be removed. This helps keep the code simple and understandable. The spl-0.6.x releases have supported kernels all the way back to 2.6.26. This patch stack moves that cut off up to 2.6.32 and newer kernels. This ensures we still support all the major enterprise distributions which are largely locked in to 2.6.32 based kernels. And at the same time we can shed a large amount of compatibility code which simplifies maintenance and new development. Signed-off-by: Tim Chase <tim@chase2k.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #395
This commit is contained in:
commit
46c936756e
2
META
2
META
|
@ -4,3 +4,5 @@ Branch: 1.0
|
|||
Version: 0.6.3
|
||||
Release: 1
|
||||
Release-Tags: relext
|
||||
License: GPL
|
||||
Author: OpenZFS on Linux
|
||||
|
|
1067
config/spl-build.m4
1067
config/spl-build.m4
File diff suppressed because it is too large
Load Diff
|
@ -76,6 +76,14 @@ AC_DEFUN([SPL_AC_META], [
|
|||
AC_SUBST([RELEASE])
|
||||
fi
|
||||
|
||||
SPL_META_LICENSE=_SPL_AC_META_GETVAL([License]);
|
||||
if test -n "$SPL_META_LICENSE"; then
|
||||
AC_DEFINE_UNQUOTED([SPL_META_LICENSE], ["$SPL_META_LICENSE"],
|
||||
[Define the project license.]
|
||||
)
|
||||
AC_SUBST([SPL_META_LICENSE])
|
||||
fi
|
||||
|
||||
if test -n "$SPL_META_NAME" -a -n "$SPL_META_VERSION"; then
|
||||
SPL_META_ALIAS="$SPL_META_NAME-$SPL_META_VERSION"
|
||||
test -n "$SPL_META_RELEASE" &&
|
||||
|
|
|
@ -6,7 +6,6 @@ KERNEL_H = \
|
|||
$(top_srcdir)/include/splat-ctl.h \
|
||||
$(top_srcdir)/include/spl-ctl.h \
|
||||
$(top_srcdir)/include/spl-debug.h \
|
||||
$(top_srcdir)/include/spl-device.h \
|
||||
$(top_srcdir)/include/spl-trace.h \
|
||||
$(top_srcdir)/include/strings.h \
|
||||
$(top_srcdir)/include/unistd.h
|
||||
|
|
|
@ -5,18 +5,12 @@ KERNEL_H = \
|
|||
$(top_srcdir)/include/linux/compiler_compat.h \
|
||||
$(top_srcdir)/include/linux/delay_compat.h \
|
||||
$(top_srcdir)/include/linux/file_compat.h \
|
||||
$(top_srcdir)/include/linux/kallsyms_compat.h \
|
||||
$(top_srcdir)/include/linux/list_compat.h \
|
||||
$(top_srcdir)/include/linux/math64_compat.h \
|
||||
$(top_srcdir)/include/linux/mm_compat.h \
|
||||
$(top_srcdir)/include/linux/module_compat.h \
|
||||
$(top_srcdir)/include/linux/mutex_compat.h \
|
||||
$(top_srcdir)/include/linux/proc_compat.h \
|
||||
$(top_srcdir)/include/linux/rwsem_compat.h \
|
||||
$(top_srcdir)/include/linux/smp_compat.h \
|
||||
$(top_srcdir)/include/linux/sysctl_compat.h \
|
||||
$(top_srcdir)/include/linux/time_compat.h \
|
||||
$(top_srcdir)/include/linux/uaccess_compat.h \
|
||||
$(top_srcdir)/include/linux/wait_compat.h \
|
||||
$(top_srcdir)/include/linux/zlib_compat.h
|
||||
|
||||
|
|
|
@ -27,17 +27,5 @@
|
|||
|
||||
#include <linux/bitops.h>
|
||||
|
||||
#ifndef HAVE_FLS64
|
||||
|
||||
static inline int fls64(__u64 x)
|
||||
{
|
||||
__u32 h = x >> 32;
|
||||
if (h)
|
||||
return fls(h) + 32;
|
||||
return fls(x);
|
||||
}
|
||||
|
||||
#endif /* HAVE_FLS64 */
|
||||
|
||||
#endif /* _SPL_BITOPS_COMPAT_H */
|
||||
|
||||
|
|
|
@ -59,37 +59,25 @@ spl_filp_fallocate(struct file *fp, int mode, loff_t offset, loff_t len)
|
|||
if (fp->f_op->fallocate)
|
||||
error = fp->f_op->fallocate(fp, mode, offset, len);
|
||||
#else
|
||||
# ifdef HAVE_INODE_FALLOCATE
|
||||
#ifdef HAVE_INODE_FALLOCATE
|
||||
if (fp->f_dentry && fp->f_dentry->d_inode &&
|
||||
fp->f_dentry->d_inode->i_op->fallocate)
|
||||
error = fp->f_dentry->d_inode->i_op->fallocate(
|
||||
fp->f_dentry->d_inode, mode, offset, len);
|
||||
# endif /* HAVE_INODE_FALLOCATE */
|
||||
#endif /* HAVE_INODE_FALLOCATE */
|
||||
#endif /*HAVE_FILE_FALLOCATE */
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
#ifdef HAVE_VFS_FSYNC
|
||||
# ifdef HAVE_2ARGS_VFS_FSYNC
|
||||
# define spl_filp_fsync(fp, sync) vfs_fsync(fp, sync)
|
||||
# else
|
||||
# define spl_filp_fsync(fp, sync) vfs_fsync(fp, (fp)->f_dentry, sync)
|
||||
# endif /* HAVE_2ARGS_VFS_FSYNC */
|
||||
#ifdef HAVE_2ARGS_VFS_FSYNC
|
||||
#define spl_filp_fsync(fp, sync) vfs_fsync(fp, sync)
|
||||
#else
|
||||
# include <linux/buffer_head.h>
|
||||
# define spl_filp_fsync(fp, sync) file_fsync(fp, (fp)->f_dentry, sync)
|
||||
#endif /* HAVE_VFS_FSYNC */
|
||||
#define spl_filp_fsync(fp, sync) vfs_fsync(fp, (fp)->f_dentry, sync)
|
||||
#endif /* HAVE_2ARGS_VFS_FSYNC */
|
||||
|
||||
#ifdef HAVE_INODE_I_MUTEX
|
||||
#define spl_inode_lock(ip) (mutex_lock(&(ip)->i_mutex))
|
||||
#define spl_inode_lock_nested(ip, type) (mutex_lock_nested((&(ip)->i_mutex), \
|
||||
(type)))
|
||||
#define spl_inode_unlock(ip) (mutex_unlock(&(ip)->i_mutex))
|
||||
#else
|
||||
#define spl_inode_lock(ip) (down(&(ip)->i_sem))
|
||||
#define spl_inode_unlock(ip) (up(&(ip)->i_sem))
|
||||
#endif /* HAVE_INODE_I_MUTEX */
|
||||
#define spl_inode_lock(ip) mutex_lock(&(ip)->i_mutex)
|
||||
#define spl_inode_unlock(ip) mutex_unlock(&(ip)->i_mutex)
|
||||
|
||||
#endif /* SPL_FILE_COMPAT_H */
|
||||
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_KALLSYMS_COMPAT_H
|
||||
#define _SPL_KALLSYMS_COMPAT_H
|
||||
|
||||
#define SYMBOL_POISON ((void*)0xabcddcba)
|
||||
|
||||
#ifdef HAVE_KALLSYMS_LOOKUP_NAME
|
||||
|
||||
#include <linux/kallsyms.h>
|
||||
#define spl_kallsyms_lookup_name(name) kallsyms_lookup_name(name)
|
||||
|
||||
#else
|
||||
|
||||
extern wait_queue_head_t spl_kallsyms_lookup_name_waitq;
|
||||
typedef unsigned long (*kallsyms_lookup_name_t)(const char *);
|
||||
extern kallsyms_lookup_name_t spl_kallsyms_lookup_name_fn;
|
||||
#define spl_kallsyms_lookup_name(name) spl_kallsyms_lookup_name_fn(name)
|
||||
|
||||
#endif /* HAVE_KALLSYMS_LOOKUP_NAME */
|
||||
|
||||
#endif /* _SPL_KALLSYMS_COMPAT_H */
|
|
@ -28,22 +28,6 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
/*
|
||||
* Linux 2.6.31 API Change.
|
||||
* Individual pages_{min,low,high} moved in to watermark array.
|
||||
*/
|
||||
#ifndef min_wmark_pages
|
||||
#define min_wmark_pages(z) (z->pages_min)
|
||||
#endif
|
||||
|
||||
#ifndef low_wmark_pages
|
||||
#define low_wmark_pages(z) (z->pages_low)
|
||||
#endif
|
||||
|
||||
#ifndef high_wmark_pages
|
||||
#define high_wmark_pages(z) (z->pages_high)
|
||||
#endif
|
||||
|
||||
#if !defined(HAVE_SHRINK_CONTROL_STRUCT)
|
||||
struct shrink_control {
|
||||
gfp_t gfp_mask;
|
||||
|
@ -51,102 +35,6 @@ struct shrink_control {
|
|||
};
|
||||
#endif /* HAVE_SHRINK_CONTROL_STRUCT */
|
||||
|
||||
/*
|
||||
* 2.6.xx API compat,
|
||||
* There currently exists no exposed API to partially shrink the dcache.
|
||||
* The expected mechanism to shrink the cache is a registered shrinker
|
||||
* which is called during memory pressure.
|
||||
*/
|
||||
#ifndef HAVE_SHRINK_DCACHE_MEMORY
|
||||
# if defined(HAVE_SHRINK_CONTROL_STRUCT)
|
||||
typedef int (*shrink_dcache_memory_t)(struct shrinker *,
|
||||
struct shrink_control *);
|
||||
extern shrink_dcache_memory_t shrink_dcache_memory_fn;
|
||||
# define shrink_dcache_memory(nr, gfp) \
|
||||
({ \
|
||||
struct shrink_control sc = { .nr_to_scan = nr, .gfp_mask = gfp }; \
|
||||
int __ret__ = 0; \
|
||||
\
|
||||
if (shrink_dcache_memory_fn) \
|
||||
__ret__ = shrink_dcache_memory_fn(NULL, &sc); \
|
||||
\
|
||||
__ret__; \
|
||||
})
|
||||
# elif defined(HAVE_3ARGS_SHRINKER_CALLBACK)
|
||||
typedef int (*shrink_dcache_memory_t)(struct shrinker *, int, gfp_t);
|
||||
extern shrink_dcache_memory_t shrink_dcache_memory_fn;
|
||||
# define shrink_dcache_memory(nr, gfp) \
|
||||
({ \
|
||||
int __ret__ = 0; \
|
||||
\
|
||||
if (shrink_dcache_memory_fn) \
|
||||
__ret__ = shrink_dcache_memory_fn(NULL, nr, gfp); \
|
||||
\
|
||||
__ret__; \
|
||||
})
|
||||
# else
|
||||
typedef int (*shrink_dcache_memory_t)(int, gfp_t);
|
||||
extern shrink_dcache_memory_t shrink_dcache_memory_fn;
|
||||
# define shrink_dcache_memory(nr, gfp) \
|
||||
({ \
|
||||
int __ret__ = 0; \
|
||||
\
|
||||
if (shrink_dcache_memory_fn) \
|
||||
__ret__ = shrink_dcache_memory_fn(nr, gfp); \
|
||||
\
|
||||
__ret__; \
|
||||
})
|
||||
# endif /* HAVE_3ARGS_SHRINKER_CALLBACK */
|
||||
#endif /* HAVE_SHRINK_DCACHE_MEMORY */
|
||||
|
||||
/*
|
||||
* 2.6.xx API compat,
|
||||
* There currently exists no exposed API to partially shrink the icache.
|
||||
* The expected mechanism to shrink the cache is a registered shrinker
|
||||
* which is called during memory pressure.
|
||||
*/
|
||||
#ifndef HAVE_SHRINK_ICACHE_MEMORY
|
||||
# if defined(HAVE_SHRINK_CONTROL_STRUCT)
|
||||
typedef int (*shrink_icache_memory_t)(struct shrinker *,
|
||||
struct shrink_control *);
|
||||
extern shrink_icache_memory_t shrink_icache_memory_fn;
|
||||
# define shrink_icache_memory(nr, gfp) \
|
||||
({ \
|
||||
struct shrink_control sc = { .nr_to_scan = nr, .gfp_mask = gfp }; \
|
||||
int __ret__ = 0; \
|
||||
\
|
||||
if (shrink_icache_memory_fn) \
|
||||
__ret__ = shrink_icache_memory_fn(NULL, &sc); \
|
||||
\
|
||||
__ret__; \
|
||||
})
|
||||
# elif defined(HAVE_3ARGS_SHRINKER_CALLBACK)
|
||||
typedef int (*shrink_icache_memory_t)(struct shrinker *, int, gfp_t);
|
||||
extern shrink_icache_memory_t shrink_icache_memory_fn;
|
||||
# define shrink_icache_memory(nr, gfp) \
|
||||
({ \
|
||||
int __ret__ = 0; \
|
||||
\
|
||||
if (shrink_icache_memory_fn) \
|
||||
__ret__ = shrink_icache_memory_fn(NULL, nr, gfp); \
|
||||
\
|
||||
__ret__; \
|
||||
})
|
||||
# else
|
||||
typedef int (*shrink_icache_memory_t)(int, gfp_t);
|
||||
extern shrink_icache_memory_t shrink_icache_memory_fn;
|
||||
# define shrink_icache_memory(nr, gfp) \
|
||||
({ \
|
||||
int __ret__ = 0; \
|
||||
\
|
||||
if (shrink_icache_memory_fn) \
|
||||
__ret__ = shrink_icache_memory_fn(nr, gfp); \
|
||||
\
|
||||
__ret__; \
|
||||
})
|
||||
# endif /* HAVE_3ARGS_SHRINKER_CALLBACK */
|
||||
#endif /* HAVE_SHRINK_ICACHE_MEMORY */
|
||||
|
||||
/*
|
||||
* Due to frequent changes in the shrinker API the following
|
||||
* compatibility wrappers should be used. They are as follows:
|
||||
|
|
|
@ -1,36 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_MUTEX_COMPAT_H
|
||||
#define _SPL_MUTEX_COMPAT_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
|
||||
/* mutex_lock_nested() introduced in 2.6.18 */
|
||||
#ifndef HAVE_MUTEX_LOCK_NESTED
|
||||
# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
|
||||
#endif /* HAVE_MUTEX_LOCK_NESTED */
|
||||
|
||||
#endif /* _SPL_MUTEX_COMPAT_H */
|
||||
|
|
@ -27,21 +27,6 @@
|
|||
|
||||
#include <linux/proc_fs.h>
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
#ifdef HAVE_2ARGS_REGISTER_SYSCTL
|
||||
#define spl_register_sysctl_table(t, a) register_sysctl_table(t, a)
|
||||
#else
|
||||
#define spl_register_sysctl_table(t, a) register_sysctl_table(t)
|
||||
#endif /* HAVE_2ARGS_REGISTER_SYSCTL */
|
||||
#define spl_unregister_sysctl_table(t) unregister_sysctl_table(t)
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
#ifdef HAVE_CTL_NAME
|
||||
#define CTL_NAME(cname) .ctl_name = (cname),
|
||||
#else
|
||||
#define CTL_NAME(cname)
|
||||
#endif
|
||||
|
||||
extern struct proc_dir_entry *proc_spl_kstat;
|
||||
|
||||
int spl_proc_init(void);
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_SMP_COMPAT_H
|
||||
#define _SPL_SMP_COMPAT_H
|
||||
|
||||
#include <linux/smp.h>
|
||||
|
||||
#ifdef HAVE_3ARGS_ON_EACH_CPU
|
||||
|
||||
#define spl_on_each_cpu(func,info,wait) on_each_cpu(func,info,wait)
|
||||
|
||||
#else
|
||||
|
||||
#define spl_on_each_cpu(func,info,wait) on_each_cpu(func,info,0,wait)
|
||||
|
||||
#endif /* HAVE_3ARGS_ON_EACH_CPU */
|
||||
|
||||
#endif /* _SPL_SMP_COMPAT_H */
|
|
@ -1,96 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_SYSCTL_COMPAT_H
|
||||
#define _SPL_SYSCTL_COMPAT_H
|
||||
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
/* proc_handler() / proc_do* API changes
|
||||
* 2.6.x - 2.6.31: 6 args, prototype includes 'struct file *'
|
||||
* 2.6.32 - 2.6.y: 5 args, removed unused 'struct file *' from prototype
|
||||
*
|
||||
* Generic SPL_PROC_HANDLER() macro should be used for correct prototypes.
|
||||
* It will define the following function arguments which can and should be
|
||||
* used with the spl_proc_* helper macros.
|
||||
*
|
||||
* struct ctl_table *table,
|
||||
* int write,
|
||||
* struct file *filp [2.6.31 and earlier kernels],
|
||||
* void __user *buffer,
|
||||
* size_t *lenp,
|
||||
* loff_t *ppos,
|
||||
*/
|
||||
#ifdef HAVE_5ARGS_PROC_HANDLER
|
||||
|
||||
#define SPL_PROC_HANDLER(proc_handler) \
|
||||
static int \
|
||||
proc_handler(struct ctl_table *table, int write, \
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
|
||||
#define spl_proc_dostring(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dostring(table, write, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dointvec(table, write, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dointvec_minmax(table, write, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec_jiffies(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dointvec_jiffies(table, write, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec_userhz_jiffies(table,write,filp,buffer,lenp,ppos) \
|
||||
proc_dointvec_userhz_jiffies(table, write, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec_ms_jiffies(table,write,filp,buffer,lenp,ppos) \
|
||||
proc_dointvec_ms_jiffies(table, write, buffer, lenp, ppos)
|
||||
#define spl_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_doulongvec_minmax(table, write, buffer, lenp, ppos)
|
||||
#define spl_proc_doulongvec_ms_jiffies_minmax(table,write,filp,buffer,lenp,ppos)\
|
||||
proc_doulongvec_ms_jiffies_minmax(table, write, buffer, lenp, ppos)
|
||||
|
||||
#else /* HAVE_5ARGS_PROC_HANDLER */
|
||||
|
||||
#define SPL_PROC_HANDLER(proc_handler) \
|
||||
static int \
|
||||
proc_handler(struct ctl_table *table, int write, struct file *filp, \
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
|
||||
#define spl_proc_dostring(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dostring(table, write, filp, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dointvec(table, write, filp, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec_jiffies(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dointvec_jiffies(table, write, filp, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec_userhz_jiffies(table,write,filp,buffer,lenp,ppos) \
|
||||
proc_dointvec_userhz_jiffies(table, write, filp, buffer, lenp, ppos)
|
||||
#define spl_proc_dointvec_ms_jiffies(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_dointvec_ms_jiffies(table, write, filp, buffer, lenp, ppos)
|
||||
#define spl_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos) \
|
||||
proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos)
|
||||
#define spl_proc_doulongvec_ms_jiffies_minmax(table,write,filp,buffer,lenp,ppos) \
|
||||
proc_doulongvec_ms_jiffies_minmax(table,write,filp,buffer,lenp,ppos)
|
||||
|
||||
|
||||
#endif /* HAVE_5ARGS_PROC_HANDLER */
|
||||
|
||||
#endif /* _SPL_SYSCTL_COMPAT_H */
|
|
@ -1,45 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_TIME_COMPAT_H
|
||||
#define _SPL_TIME_COMPAT_H
|
||||
|
||||
#include <linux/time.h>
|
||||
|
||||
/* timespec_sub() API changes
|
||||
* 2.6.18 - 2.6.x: Inline function provided by linux/time.h
|
||||
*/
|
||||
#ifndef HAVE_TIMESPEC_SUB
|
||||
static inline struct timespec
|
||||
timespec_sub(struct timespec lhs, struct timespec rhs)
|
||||
{
|
||||
struct timespec ts_delta;
|
||||
set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec,
|
||||
lhs.tv_nsec - rhs.tv_nsec);
|
||||
return ts_delta;
|
||||
}
|
||||
#endif /* HAVE_TIMESPEC_SUB */
|
||||
|
||||
#endif /* _SPL_TIME_COMPAT_H */
|
||||
|
|
@ -1,35 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_UACCESS_COMPAT_H
|
||||
#define _SPL_UACCESS_COMPAT_H
|
||||
|
||||
#ifdef HAVE_UACCESS_HEADER
|
||||
#include <linux/uaccess.h>
|
||||
#else
|
||||
#include <asm/uaccess.h>
|
||||
#endif
|
||||
|
||||
#endif /* _SPL_UACCESS_COMPAT_H */
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_DEVICE_H
|
||||
#define _SPL_DEVICE_H
|
||||
|
||||
#include <linux/device.h>
|
||||
|
||||
/*
|
||||
* Preferred API from 2.6.18 to 2.6.26+
|
||||
*/
|
||||
#ifdef HAVE_DEVICE_CREATE
|
||||
|
||||
typedef struct class spl_class;
|
||||
typedef struct device spl_device;
|
||||
|
||||
#define spl_class_create(mod, name) class_create(mod, name)
|
||||
#define spl_class_destroy(cls) class_destroy(cls)
|
||||
|
||||
# ifdef HAVE_5ARGS_DEVICE_CREATE
|
||||
# define spl_device_create(cls, parent, devt, drvdata, fmt, args...) \
|
||||
device_create(cls, parent, devt, drvdata, fmt, ## args)
|
||||
# else
|
||||
# define spl_device_create(cls, parent, devt, drvdata, fmt, args...) \
|
||||
device_create(cls, parent, devt, fmt, ## args)
|
||||
# endif
|
||||
|
||||
#define spl_device_destroy(cls, cls_dev, devt) \
|
||||
device_destroy(cls, devt)
|
||||
|
||||
/*
|
||||
* Preferred API from 2.6.13 to 2.6.17
|
||||
* Depricated in 2.6.18
|
||||
* Removed in 2.6.26
|
||||
*/
|
||||
#else
|
||||
#ifdef HAVE_CLASS_DEVICE_CREATE
|
||||
|
||||
typedef struct class spl_class;
|
||||
typedef struct class_device spl_device;
|
||||
|
||||
#define spl_class_create(mod, name) class_create(mod, name)
|
||||
#define spl_class_destroy(cls) class_destroy(cls)
|
||||
#define spl_device_create(cls, parent, devt, device, fmt, args...) \
|
||||
class_device_create(cls, parent, devt, device, fmt, ## args)
|
||||
#define spl_device_destroy(cls, cls_dev, devt) \
|
||||
class_device_unregister(cls_dev)
|
||||
|
||||
/*
|
||||
* Prefered API from 2.6.0 to 2.6.12
|
||||
* Depricated in 2.6.13
|
||||
* Removed in 2.6.13
|
||||
*/
|
||||
#else /* Legacy API */
|
||||
|
||||
typedef struct class_simple spl_class;
|
||||
typedef struct class_device spl_class_device;
|
||||
|
||||
#define spl_class_create(mod, name) class_simple_create(mod, name)
|
||||
#define spl_class_destroy(cls) class_simple_destroy(cls)
|
||||
#define spl_device_create(cls, parent, devt, device, fmt, args...) \
|
||||
class_simple_device_add(cls, devt, device, fmt, ## args)
|
||||
#define spl_device_destroy(cls, cls_dev, devt) \
|
||||
class_simple_device_remove(devt)
|
||||
|
||||
#endif /* HAVE_CLASS_DEVICE_CREATE */
|
||||
#endif /* HAVE_DEVICE_CREATE */
|
||||
|
||||
#endif /* _SPL_DEVICE_H */
|
|
@ -32,8 +32,6 @@
|
|||
* ensure 32-bit/64-bit interoperability over ioctl()'s only types with
|
||||
* fixed sizes can be used.
|
||||
*/
|
||||
#define SPLAT_MAJOR 225 /* XXX - Arbitrary */
|
||||
#define SPLAT_MINORS 1
|
||||
#define SPLAT_NAME "splatctl"
|
||||
#define SPLAT_DEV "/dev/splatctl"
|
||||
|
||||
|
|
|
@ -90,7 +90,6 @@ KERNEL_H = \
|
|||
$(top_srcdir)/include/sys/u8_textprep.h \
|
||||
$(top_srcdir)/include/sys/uio.h \
|
||||
$(top_srcdir)/include/sys/unistd.h \
|
||||
$(top_srcdir)/include/sys/utsname.h \
|
||||
$(top_srcdir)/include/sys/va_list.h \
|
||||
$(top_srcdir)/include/sys/varargs.h \
|
||||
$(top_srcdir)/include/sys/vfs.h \
|
||||
|
|
|
@ -29,14 +29,6 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
#ifndef HAVE_ATOMIC64_CMPXCHG
|
||||
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
#endif
|
||||
|
||||
#ifndef HAVE_ATOMIC64_XCHG
|
||||
#define atomic64_xchg(v, n) (xchg(&((v)->counter), n))
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Two approaches to atomic operations are implemented each with its
|
||||
* own benefits are drawbacks imposed by the Solaris API. Neither
|
||||
|
|
|
@ -29,49 +29,38 @@
|
|||
#include <sys/types.h>
|
||||
#include <sys/vfs.h>
|
||||
|
||||
#ifdef HAVE_CRED_STRUCT
|
||||
|
||||
typedef struct cred cred_t;
|
||||
|
||||
#define kcred ((cred_t *)(init_task.cred))
|
||||
#define CRED() ((cred_t *)current_cred())
|
||||
|
||||
#else
|
||||
|
||||
typedef struct task_struct cred_t;
|
||||
|
||||
#define kcred ((cred_t *)&init_task)
|
||||
#define CRED() ((cred_t *)current)
|
||||
|
||||
#endif /* HAVE_CRED_STRUCT */
|
||||
#define kcred ((cred_t *)(init_task.cred))
|
||||
#define CRED() ((cred_t *)current_cred())
|
||||
|
||||
#ifdef HAVE_KUIDGID_T
|
||||
|
||||
/*
|
||||
* Linux 3.8+ uses typedefs to redefine uid_t and gid_t. We have to rename the
|
||||
* typedefs to recover the original types. We then can use them provided that
|
||||
* we are careful about translating from k{g,u}id_t to the original versions
|
||||
* and vice versa.
|
||||
*/
|
||||
#define uid_t xuid_t
|
||||
#define gid_t xgid_t
|
||||
#include <linux/uidgid.h>
|
||||
#undef uid_t
|
||||
#undef gid_t
|
||||
/*
|
||||
* Linux 3.8+ uses typedefs to redefine uid_t and gid_t. We have to rename the
|
||||
* typedefs to recover the original types. We then can use them provided that
|
||||
* we are careful about translating from k{g,u}id_t to the original versions
|
||||
* and vice versa.
|
||||
*/
|
||||
#define uid_t xuid_t
|
||||
#define gid_t xgid_t
|
||||
#include <linux/uidgid.h>
|
||||
#undef uid_t
|
||||
#undef gid_t
|
||||
|
||||
#define KUID_TO_SUID(x) (__kuid_val(x))
|
||||
#define KGID_TO_SGID(x) (__kgid_val(x))
|
||||
#define SUID_TO_KUID(x) (KUIDT_INIT(x))
|
||||
#define SGID_TO_KGID(x) (KGIDT_INIT(x))
|
||||
#define KGIDP_TO_SGIDP(x) (&(x)->val)
|
||||
#define KUID_TO_SUID(x) (__kuid_val(x))
|
||||
#define KGID_TO_SGID(x) (__kgid_val(x))
|
||||
#define SUID_TO_KUID(x) (KUIDT_INIT(x))
|
||||
#define SGID_TO_KGID(x) (KGIDT_INIT(x))
|
||||
#define KGIDP_TO_SGIDP(x) (&(x)->val)
|
||||
|
||||
#else /* HAVE_KUIDGID_T */
|
||||
|
||||
#define KUID_TO_SUID(x) (x)
|
||||
#define KGID_TO_SGID(x) (x)
|
||||
#define SUID_TO_KUID(x) (x)
|
||||
#define SGID_TO_KGID(x) (x)
|
||||
#define KGIDP_TO_SGIDP(x) (x)
|
||||
#define KUID_TO_SUID(x) (x)
|
||||
#define KGID_TO_SGID(x) (x)
|
||||
#define SUID_TO_KUID(x) (x)
|
||||
#define SGID_TO_KGID(x) (x)
|
||||
#define KGIDP_TO_SGIDP(x) (x)
|
||||
|
||||
#endif /* HAVE_KUIDGID_T */
|
||||
|
||||
|
|
|
@ -25,22 +25,4 @@
|
|||
#ifndef _SPL_DNLC_H
|
||||
#define _SPL_DNLC_H
|
||||
|
||||
/*
|
||||
* Reduce the dcache and icache then reap the free'd slabs. Note the
|
||||
* interface takes a reclaim percentage but we don't have easy access to
|
||||
* the total number of entries to calculate the reclaim count. However,
|
||||
* in practice this doesn't need to be even close to correct. We simply
|
||||
* need to reclaim some useful fraction of the cache. The caller can
|
||||
* determine if more needs to be done.
|
||||
*/
|
||||
static inline void
|
||||
dnlc_reduce_cache(void *reduce_percent)
|
||||
{
|
||||
int nr = (uintptr_t)reduce_percent * 10000;
|
||||
|
||||
shrink_dcache_memory(nr, GFP_KERNEL);
|
||||
shrink_icache_memory(nr, GFP_KERNEL);
|
||||
kmem_reap();
|
||||
}
|
||||
|
||||
#endif /* SPL_DNLC_H */
|
||||
|
|
|
@ -135,7 +135,6 @@ kzalloc_nofail(size_t size, gfp_t flags)
|
|||
static inline void *
|
||||
kmalloc_node_nofail(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
#ifdef HAVE_KMALLOC_NODE
|
||||
void *ptr;
|
||||
|
||||
sanitize_flags(current, &flags);
|
||||
|
@ -145,9 +144,6 @@ kmalloc_node_nofail(size_t size, gfp_t flags, int node)
|
|||
} while (ptr == NULL && (flags & __GFP_WAIT));
|
||||
|
||||
return ptr;
|
||||
#else
|
||||
return kmalloc_nofail(size, flags);
|
||||
#endif /* HAVE_KMALLOC_NODE */
|
||||
}
|
||||
|
||||
static inline void *
|
||||
|
@ -502,7 +498,6 @@ extern void spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj);
|
|||
extern void spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count);
|
||||
extern void spl_kmem_reap(void);
|
||||
|
||||
int spl_kmem_init_kallsyms_lookup(void);
|
||||
int spl_kmem_init(void);
|
||||
void spl_kmem_fini(void);
|
||||
|
||||
|
|
|
@ -35,7 +35,8 @@ typedef enum {
|
|||
MUTEX_ADAPTIVE = 2
|
||||
} kmutex_type_t;
|
||||
|
||||
#if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
|
||||
#if defined(HAVE_MUTEX_OWNER) && defined(CONFIG_SMP) && \
|
||||
!defined(CONFIG_DEBUG_MUTEXES)
|
||||
|
||||
/*
|
||||
* We define a 1-field struct rather than a straight typedef to enforce type
|
||||
|
@ -82,15 +83,9 @@ mutex_owner(kmutex_t *mp)
|
|||
({ \
|
||||
ASSERT3P(mutex_owner(mp), !=, current); \
|
||||
mutex_lock(&(mp)->m); \
|
||||
})
|
||||
})
|
||||
#define mutex_exit(mp) mutex_unlock(&(mp)->m)
|
||||
|
||||
#ifdef HAVE_GPL_ONLY_SYMBOLS
|
||||
# define mutex_enter_nested(mp, sc) mutex_lock_nested(&(mp)->m, sc)
|
||||
#else
|
||||
# define mutex_enter_nested(mp, sc) mutex_enter(mp)
|
||||
#endif /* HAVE_GPL_ONLY_SYMBOLS */
|
||||
|
||||
#else /* HAVE_MUTEX_OWNER */
|
||||
|
||||
typedef struct {
|
||||
|
@ -98,13 +93,6 @@ typedef struct {
|
|||
kthread_t *m_owner;
|
||||
} kmutex_t;
|
||||
|
||||
#ifdef HAVE_TASK_CURR
|
||||
extern int spl_mutex_spin_max(void);
|
||||
#else /* HAVE_TASK_CURR */
|
||||
# define task_curr(owner) 0
|
||||
# define spl_mutex_spin_max() 0
|
||||
#endif /* HAVE_TASK_CURR */
|
||||
|
||||
#define MUTEX(mp) (&((mp)->m_mutex))
|
||||
|
||||
static inline void
|
||||
|
@ -156,39 +144,11 @@ spl_mutex_clear_owner(kmutex_t *mp)
|
|||
_rc_; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Adaptive mutexs assume that the lock may be held by a task running
|
||||
* on a different cpu. The expectation is that the task will drop the
|
||||
* lock before leaving the head of the run queue. So the ideal thing
|
||||
* to do is spin until we acquire the lock and avoid a context switch.
|
||||
* However it is also possible the task holding the lock yields the
|
||||
* processor with out dropping lock. In this case, we know it's going
|
||||
* to be a while so we stop spinning and go to sleep waiting for the
|
||||
* lock to be available. This should strike the optimum balance
|
||||
* between spinning and sleeping waiting for a lock.
|
||||
*/
|
||||
#define mutex_enter(mp) \
|
||||
({ \
|
||||
kthread_t *_owner_; \
|
||||
int _rc_, _count_; \
|
||||
\
|
||||
_rc_ = 0; \
|
||||
_count_ = 0; \
|
||||
_owner_ = mutex_owner(mp); \
|
||||
ASSERT3P(_owner_, !=, current); \
|
||||
\
|
||||
while (_owner_ && task_curr(_owner_) && \
|
||||
_count_ <= spl_mutex_spin_max()) { \
|
||||
if ((_rc_ = mutex_trylock(MUTEX(mp)))) \
|
||||
break; \
|
||||
\
|
||||
_count_++; \
|
||||
} \
|
||||
\
|
||||
if (!_rc_) \
|
||||
mutex_lock(MUTEX(mp)); \
|
||||
\
|
||||
spl_mutex_set_owner(mp); \
|
||||
ASSERT3P(mutex_owner(mp), !=, current); \
|
||||
mutex_lock(MUTEX(mp)); \
|
||||
spl_mutex_set_owner(mp); \
|
||||
})
|
||||
|
||||
#define mutex_exit(mp) \
|
||||
|
@ -197,19 +157,6 @@ spl_mutex_clear_owner(kmutex_t *mp)
|
|||
mutex_unlock(MUTEX(mp)); \
|
||||
})
|
||||
|
||||
#ifdef HAVE_GPL_ONLY_SYMBOLS
|
||||
# define mutex_enter_nested(mp, sc) \
|
||||
({ \
|
||||
mutex_lock_nested(MUTEX(mp), sc); \
|
||||
spl_mutex_set_owner(mp); \
|
||||
})
|
||||
#else
|
||||
# define mutex_enter_nested(mp, sc) \
|
||||
({ \
|
||||
mutex_enter(mp); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#endif /* HAVE_MUTEX_OWNER */
|
||||
|
||||
int spl_mutex_init(void);
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <sys/mutex.h>
|
||||
#include <sys/u8_textprep.h>
|
||||
#include <sys/vnode.h>
|
||||
#include <spl-device.h>
|
||||
|
||||
typedef int ddi_devid_t;
|
||||
|
||||
|
|
|
@ -152,13 +152,12 @@ extern char spl_version[32];
|
|||
extern unsigned long spl_hostid;
|
||||
|
||||
/* Missing misc functions */
|
||||
extern int highbit(unsigned long i);
|
||||
extern int highbit64(uint64_t i);
|
||||
extern uint32_t zone_get_hostid(void *zone);
|
||||
extern void spl_setup(void);
|
||||
extern void spl_cleanup(void);
|
||||
|
||||
#define makedevice(maj,min) makedev(maj,min)
|
||||
#define highbit64(x) fls64(x)
|
||||
#define makedevice(maj,min) makedev(maj,min)
|
||||
|
||||
/* common macros */
|
||||
#ifndef MIN
|
||||
|
|
|
@ -23,74 +23,57 @@
|
|||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_TIME_H
|
||||
#define _SPL_TIME_H
|
||||
#define _SPL_TIME_H
|
||||
|
||||
/*
|
||||
* Structure returned by gettimeofday(2) system call,
|
||||
* and used in other calls.
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/timer.h>
|
||||
|
||||
#if defined(CONFIG_64BIT)
|
||||
#define TIME_MAX INT64_MAX
|
||||
#define TIME_MIN INT64_MIN
|
||||
#define TIME_MAX INT64_MAX
|
||||
#define TIME_MIN INT64_MIN
|
||||
#else
|
||||
#define TIME_MAX INT32_MAX
|
||||
#define TIME_MIN INT32_MIN
|
||||
#define TIME_MAX INT32_MAX
|
||||
#define TIME_MIN INT32_MIN
|
||||
#endif
|
||||
|
||||
#define SEC 1
|
||||
#define MILLISEC 1000
|
||||
#define MICROSEC 1000000
|
||||
#define NANOSEC 1000000000
|
||||
#define SEC 1
|
||||
#define MILLISEC 1000
|
||||
#define MICROSEC 1000000
|
||||
#define NANOSEC 1000000000
|
||||
|
||||
#define MSEC2NSEC(m) ((hrtime_t)(m) * (NANOSEC / MILLISEC))
|
||||
#define NSEC2MSEC(n) ((n) / (NANOSEC / MILLISEC))
|
||||
|
||||
/* Already defined in include/linux/time.h */
|
||||
#undef CLOCK_THREAD_CPUTIME_ID
|
||||
#undef CLOCK_REALTIME
|
||||
#undef CLOCK_MONOTONIC
|
||||
#undef CLOCK_PROCESS_CPUTIME_ID
|
||||
#define hz HZ
|
||||
|
||||
typedef enum clock_type {
|
||||
__CLOCK_REALTIME0 = 0, /* obsolete; same as CLOCK_REALTIME */
|
||||
CLOCK_VIRTUAL = 1, /* thread's user-level CPU clock */
|
||||
CLOCK_THREAD_CPUTIME_ID = 2, /* thread's user+system CPU clock */
|
||||
CLOCK_REALTIME = 3, /* wall clock */
|
||||
CLOCK_MONOTONIC = 4, /* high resolution monotonic clock */
|
||||
CLOCK_PROCESS_CPUTIME_ID = 5, /* process's user+system CPU clock */
|
||||
CLOCK_HIGHRES = CLOCK_MONOTONIC, /* alternate name */
|
||||
CLOCK_PROF = CLOCK_THREAD_CPUTIME_ID,/* alternate name */
|
||||
} clock_type_t;
|
||||
|
||||
#define hz \
|
||||
({ \
|
||||
ASSERT(HZ >= 100 && HZ <= MICROSEC); \
|
||||
HZ; \
|
||||
})
|
||||
|
||||
extern void __gethrestime(timestruc_t *);
|
||||
extern int __clock_gettime(clock_type_t, timespec_t *);
|
||||
extern hrtime_t __gethrtime(void);
|
||||
|
||||
#define gethrestime(ts) __gethrestime(ts)
|
||||
#define clock_gettime(fl, tp) __clock_gettime(fl, tp)
|
||||
#define gethrtime() __gethrtime()
|
||||
|
||||
static __inline__ time_t
|
||||
gethrestime_sec(void)
|
||||
{
|
||||
timestruc_t now;
|
||||
|
||||
__gethrestime(&now);
|
||||
return now.tv_sec;
|
||||
}
|
||||
|
||||
#define TIMESPEC_OVERFLOW(ts) \
|
||||
#define TIMESPEC_OVERFLOW(ts) \
|
||||
((ts)->tv_sec < TIME_MIN || (ts)->tv_sec > TIME_MAX)
|
||||
|
||||
static inline void
|
||||
gethrestime(timestruc_t *now)
|
||||
{
|
||||
struct timespec ts;
|
||||
getnstimeofday(&ts);
|
||||
now->tv_sec = ts.tv_sec;
|
||||
now->tv_nsec = ts.tv_nsec;
|
||||
}
|
||||
|
||||
static inline time_t
|
||||
gethrestime_sec(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
getnstimeofday(&ts);
|
||||
return (ts.tv_sec);
|
||||
}
|
||||
|
||||
static inline hrtime_t
|
||||
gethrtime(void)
|
||||
{
|
||||
struct timespec now;
|
||||
getrawmonotonic(&now);
|
||||
return (((hrtime_t)now.tv_sec * NSEC_PER_SEC) + now.tv_nsec);
|
||||
}
|
||||
|
||||
#endif /* _SPL_TIME_H */
|
||||
|
|
|
@ -28,26 +28,17 @@
|
|||
#include <linux/types.h>
|
||||
#include <sys/sysmacros.h>
|
||||
|
||||
#include <linux/uaccess_compat.h>
|
||||
#include <linux/file_compat.h>
|
||||
#include <linux/list_compat.h>
|
||||
#include <linux/time_compat.h>
|
||||
#include <linux/bitops_compat.h>
|
||||
#include <linux/smp_compat.h>
|
||||
#include <linux/kallsyms_compat.h>
|
||||
#include <linux/mutex_compat.h>
|
||||
#include <linux/module_compat.h>
|
||||
#include <linux/sysctl_compat.h>
|
||||
#include <linux/proc_compat.h>
|
||||
#include <linux/math64_compat.h>
|
||||
#include <linux/zlib_compat.h>
|
||||
#include <linux/mm_compat.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/wait_compat.h>
|
||||
|
||||
#ifndef HAVE_UINTPTR_T
|
||||
typedef unsigned long uintptr_t;
|
||||
#endif
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#ifndef ULLONG_MAX
|
||||
#define ULLONG_MAX (~0ULL)
|
||||
|
|
|
@ -1,34 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
\*****************************************************************************/
|
||||
|
||||
#ifndef _SPL_UTSNAME_H
|
||||
#define _SPL_UTSNAME_H
|
||||
|
||||
#include <linux/utsname.h>
|
||||
|
||||
extern struct new_utsname *__utsname(void);
|
||||
|
||||
#define utsname (*__utsname())
|
||||
|
||||
#endif /* SPL_UTSNAME_H */
|
|
@ -33,115 +33,25 @@
|
|||
#include <sys/types.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/* These values are loosely coupled with the VM page reclaim.
|
||||
* Linux uses its own heuristics to trigger page reclamation, and
|
||||
* because those interface are difficult to interface with. These
|
||||
* values should only be considered as a rough guide to the system
|
||||
* memory state and not as direct evidence that page reclamation.
|
||||
* is or is not currently in progress.
|
||||
*/
|
||||
#define membar_producer() smp_wmb()
|
||||
|
||||
#define physmem totalram_pages
|
||||
#define freemem nr_free_pages()
|
||||
#define availrmem spl_kmem_availrmem()
|
||||
|
||||
extern pgcnt_t minfree; /* Sum of zone->pages_min */
|
||||
extern pgcnt_t desfree; /* Sum of zone->pages_low */
|
||||
extern pgcnt_t lotsfree; /* Sum of zone->pages_high */
|
||||
extern pgcnt_t needfree; /* Always 0 unused in new Solaris */
|
||||
extern pgcnt_t swapfs_minfree; /* Solaris default value */
|
||||
extern pgcnt_t swapfs_reserve; /* Solaris default value */
|
||||
#define membar_producer() smp_wmb()
|
||||
#define physmem totalram_pages
|
||||
#define freemem nr_free_pages()
|
||||
|
||||
extern vmem_t *heap_arena; /* primary kernel heap arena */
|
||||
extern vmem_t *zio_alloc_arena; /* arena for zio caches */
|
||||
extern vmem_t *zio_arena; /* arena for allocating zio memory */
|
||||
|
||||
extern pgcnt_t spl_kmem_availrmem(void);
|
||||
extern size_t vmem_size(vmem_t *vmp, int typemask);
|
||||
|
||||
/*
|
||||
* The following symbols are available for use within the kernel
|
||||
* itself, and they used to be available in older kernels. But it
|
||||
* looks like they have been removed perhaps due to lack of use.
|
||||
* For our purposes we need them to access the global memory state
|
||||
* of the system, which is even available to user space process
|
||||
* in /proc/meminfo. It's odd to me that there is no kernel API
|
||||
* to get the same information, minimally the proc handler for
|
||||
* the above mentioned /proc/meminfo file would make use of it.
|
||||
*/
|
||||
#define VMEM_ALLOC 0x01
|
||||
#define VMEM_FREE 0x02
|
||||
|
||||
/* Source linux/fs/proc/mmu.c */
|
||||
#ifndef HAVE_GET_VMALLOC_INFO
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#ifndef HAVE_VMALLOC_INFO
|
||||
struct vmalloc_info {
|
||||
unsigned long used;
|
||||
unsigned long largest_chunk;
|
||||
};
|
||||
#ifndef VMALLOC_TOTAL
|
||||
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
|
||||
#endif
|
||||
|
||||
typedef void (*get_vmalloc_info_t)(struct vmalloc_info *);
|
||||
extern get_vmalloc_info_t get_vmalloc_info_fn;
|
||||
|
||||
# define VMEM_ALLOC 0x01
|
||||
# define VMEM_FREE 0x02
|
||||
# define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
|
||||
# define get_vmalloc_info(vmi) get_vmalloc_info_fn(vmi)
|
||||
#else
|
||||
# error "CONFIG_MMU must be defined"
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* HAVE_GET_VMALLOC_INFO */
|
||||
|
||||
#ifdef HAVE_PGDAT_HELPERS
|
||||
/* Source linux/mm/mmzone.c */
|
||||
# ifndef HAVE_FIRST_ONLINE_PGDAT
|
||||
typedef struct pglist_data *(*first_online_pgdat_t)(void);
|
||||
extern first_online_pgdat_t first_online_pgdat_fn;
|
||||
# define first_online_pgdat() first_online_pgdat_fn()
|
||||
# endif /* HAVE_FIRST_ONLINE_PGDAT */
|
||||
|
||||
# ifndef HAVE_NEXT_ONLINE_PGDAT
|
||||
typedef struct pglist_data *(*next_online_pgdat_t)(struct pglist_data *);
|
||||
extern next_online_pgdat_t next_online_pgdat_fn;
|
||||
# define next_online_pgdat(pgd) next_online_pgdat_fn(pgd)
|
||||
# endif /* HAVE_NEXT_ONLINE_PGDAT */
|
||||
|
||||
# ifndef HAVE_NEXT_ZONE
|
||||
typedef struct zone *(*next_zone_t)(struct zone *);
|
||||
extern next_zone_t next_zone_fn;
|
||||
# define next_zone(zone) next_zone_fn(zone)
|
||||
# endif /* HAVE_NEXT_ZONE */
|
||||
|
||||
#else /* HAVE_PGDAT_HELPERS */
|
||||
|
||||
# ifndef HAVE_PGDAT_LIST
|
||||
extern struct pglist_data *pgdat_list_addr;
|
||||
# define pgdat_list pgdat_list_addr
|
||||
# endif /* HAVE_PGDAT_LIST */
|
||||
|
||||
#endif /* HAVE_PGDAT_HELPERS */
|
||||
|
||||
/* Source linux/mm/vmstat.c */
|
||||
#if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
|
||||
typedef void (*get_zone_counts_t)(unsigned long *, unsigned long *,
|
||||
unsigned long *);
|
||||
extern get_zone_counts_t get_zone_counts_fn;
|
||||
# define get_zone_counts(a,i,f) get_zone_counts_fn(a,i,f)
|
||||
#endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
|
||||
|
||||
typedef enum spl_zone_stat_item {
|
||||
SPL_NR_FREE_PAGES,
|
||||
SPL_NR_INACTIVE,
|
||||
SPL_NR_ACTIVE,
|
||||
SPL_NR_ZONE_STAT_ITEMS
|
||||
} spl_zone_stat_item_t;
|
||||
|
||||
extern unsigned long spl_global_page_state(spl_zone_stat_item_t);
|
||||
|
||||
#define xcopyin(from, to, size) copy_from_user(to, from, size)
|
||||
#define xcopyout(from, to, size) copy_to_user(to, from, size)
|
||||
#define xcopyin(from, to, size) copy_from_user(to, from, size)
|
||||
#define xcopyout(from, to, size) copy_to_user(to, from, size)
|
||||
|
||||
static __inline__ int
|
||||
copyin(const void *from, void *to, size_t len)
|
||||
|
|
|
@ -102,30 +102,6 @@ The system hostid file
|
|||
Default value: \fB/etc/hostid\fR.
|
||||
.RE
|
||||
|
||||
.sp
|
||||
.ne 2
|
||||
.na
|
||||
\fBmutex_spin_max\fR (int)
|
||||
.ad
|
||||
.RS 12n
|
||||
Spin a maximum of N times to acquire lock
|
||||
.sp
|
||||
.ne 2
|
||||
.na
|
||||
\fBPossible values:\fR
|
||||
.sp
|
||||
.RS 12n
|
||||
\fB0\fR Never spin when trying to acquire lock
|
||||
.sp
|
||||
\fB-1\fR Spin until acquired or holder yields without dropping lock
|
||||
.sp
|
||||
\fB1-MAX_INT\fR Spin for N attempts before sleeping for lock
|
||||
.RE
|
||||
.sp
|
||||
.ne -4
|
||||
Default value: \fB0\fR.
|
||||
.RE
|
||||
|
||||
.sp
|
||||
.ne 2
|
||||
.na
|
||||
|
|
|
@ -14,7 +14,6 @@ $(MODULE)-objs += @top_srcdir@/module/spl/spl-taskq.o
|
|||
$(MODULE)-objs += @top_srcdir@/module/spl/spl-rwlock.o
|
||||
$(MODULE)-objs += @top_srcdir@/module/spl/spl-vnode.o
|
||||
$(MODULE)-objs += @top_srcdir@/module/spl/spl-err.o
|
||||
$(MODULE)-objs += @top_srcdir@/module/spl/spl-time.o
|
||||
$(MODULE)-objs += @top_srcdir@/module/spl/spl-kobj.o
|
||||
$(MODULE)-objs += @top_srcdir@/module/spl/spl-generic.o
|
||||
$(MODULE)-objs += @top_srcdir@/module/spl/spl-atomic.o
|
||||
|
|
|
@ -32,11 +32,6 @@
|
|||
|
||||
#define DEBUG_SUBSYSTEM S_CRED
|
||||
|
||||
#ifdef HAVE_GROUPS_SEARCH
|
||||
/* Symbol may be exported by custom kernel patch */
|
||||
#define cr_groups_search(gi, grp) groups_search(gi, grp)
|
||||
#else
|
||||
/* Implementation from 2.6.30 kernel */
|
||||
static int
|
||||
#ifdef HAVE_KUIDGID_T
|
||||
cr_groups_search(const struct group_info *group_info, kgid_t grp)
|
||||
|
@ -66,14 +61,6 @@ cr_groups_search(const struct group_info *group_info, gid_t grp)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CRED_STRUCT
|
||||
|
||||
/*
|
||||
* As of 2.6.29 a clean credential API appears in the linux kernel.
|
||||
* We attempt to layer the Solaris API on top of the linux API.
|
||||
*/
|
||||
|
||||
/* Hold a reference on the credential and group info */
|
||||
void
|
||||
|
@ -137,87 +124,6 @@ groupmember(gid_t gid, const cred_t *cr)
|
|||
return rc;
|
||||
}
|
||||
|
||||
#else /* HAVE_CRED_STRUCT */
|
||||
|
||||
/*
|
||||
* Until very recently all credential information was embedded in
|
||||
* the linux task struct. For this reason to simulate a Solaris
|
||||
* cred_t we need to pass the entire task structure around.
|
||||
*/
|
||||
|
||||
/* Hold a reference on the credential and group info */
|
||||
void crhold(cred_t *cr) { }
|
||||
|
||||
/* Free a reference on the credential and group info */
|
||||
void crfree(cred_t *cr) { }
|
||||
|
||||
/* Return the number of supplemental groups */
|
||||
int
|
||||
crgetngroups(const cred_t *cr)
|
||||
{
|
||||
int lock, rc;
|
||||
|
||||
lock = (cr != current);
|
||||
if (lock)
|
||||
task_lock((struct task_struct *)cr);
|
||||
|
||||
get_group_info(cr->group_info);
|
||||
rc = cr->group_info->ngroups;
|
||||
put_group_info(cr->group_info);
|
||||
|
||||
if (lock)
|
||||
task_unlock((struct task_struct *)cr);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return an array of supplemental gids. The returned address is safe
|
||||
* to use as long as the caller has taken a reference with crhold().
|
||||
* The caller is responsible for releasing the reference with crfree().
|
||||
*/
|
||||
gid_t *
|
||||
crgetgroups(const cred_t *cr)
|
||||
{
|
||||
gid_t *gids;
|
||||
int lock;
|
||||
|
||||
lock = (cr != current);
|
||||
if (lock)
|
||||
task_lock((struct task_struct *)cr);
|
||||
|
||||
get_group_info(cr->group_info);
|
||||
gids = KGID_TO_SGID(cr->group_info->blocks[0]);
|
||||
put_group_info(cr->group_info);
|
||||
|
||||
if (lock)
|
||||
task_unlock((struct task_struct *)cr);
|
||||
|
||||
return gids;
|
||||
}
|
||||
|
||||
/* Check if the passed gid is available is in supplied credential. */
|
||||
int
|
||||
groupmember(gid_t gid, const cred_t *cr)
|
||||
{
|
||||
int lock, rc;
|
||||
|
||||
lock = (cr != current);
|
||||
if (lock)
|
||||
task_lock((struct task_struct *)cr);
|
||||
|
||||
get_group_info(cr->group_info);
|
||||
rc = cr_groups_search(cr->group_info, gid);
|
||||
put_group_info(cr->group_info);
|
||||
|
||||
if (lock)
|
||||
task_unlock((struct task_struct *)cr);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
#endif /* HAVE_CRED_STRUCT */
|
||||
|
||||
/* Return the effective user id */
|
||||
uid_t
|
||||
crgetuid(const cred_t *cr)
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include <sys/debug.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/kstat.h>
|
||||
#include <sys/utsname.h>
|
||||
#include <sys/file.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/proc_compat.h>
|
||||
|
@ -60,73 +59,6 @@ MODULE_PARM_DESC(spl_hostid, "The system hostid.");
|
|||
proc_t p0 = { 0 };
|
||||
EXPORT_SYMBOL(p0);
|
||||
|
||||
#ifndef HAVE_KALLSYMS_LOOKUP_NAME
|
||||
DECLARE_WAIT_QUEUE_HEAD(spl_kallsyms_lookup_name_waitq);
|
||||
kallsyms_lookup_name_t spl_kallsyms_lookup_name_fn = SYMBOL_POISON;
|
||||
#endif
|
||||
|
||||
int
|
||||
highbit(unsigned long i)
|
||||
{
|
||||
register int h = 1;
|
||||
SENTRY;
|
||||
|
||||
if (i == 0)
|
||||
SRETURN(0);
|
||||
#if BITS_PER_LONG == 64
|
||||
if (i & 0xffffffff00000000ul) {
|
||||
h += 32; i >>= 32;
|
||||
}
|
||||
#endif
|
||||
if (i & 0xffff0000) {
|
||||
h += 16; i >>= 16;
|
||||
}
|
||||
if (i & 0xff00) {
|
||||
h += 8; i >>= 8;
|
||||
}
|
||||
if (i & 0xf0) {
|
||||
h += 4; i >>= 4;
|
||||
}
|
||||
if (i & 0xc) {
|
||||
h += 2; i >>= 2;
|
||||
}
|
||||
if (i & 0x2) {
|
||||
h += 1;
|
||||
}
|
||||
SRETURN(h);
|
||||
}
|
||||
EXPORT_SYMBOL(highbit);
|
||||
|
||||
int
|
||||
highbit64(uint64_t i)
|
||||
{
|
||||
register int h = 1;
|
||||
SENTRY;
|
||||
|
||||
if (i == 0)
|
||||
SRETURN(0);
|
||||
if (i & 0xffffffff00000000ull) {
|
||||
h += 32; i >>= 32;
|
||||
}
|
||||
if (i & 0xffff0000) {
|
||||
h += 16; i >>= 16;
|
||||
}
|
||||
if (i & 0xff00) {
|
||||
h += 8; i >>= 8;
|
||||
}
|
||||
if (i & 0xf0) {
|
||||
h += 4; i >>= 4;
|
||||
}
|
||||
if (i & 0xc) {
|
||||
h += 2; i >>= 2;
|
||||
}
|
||||
if (i & 0x2) {
|
||||
h += 1;
|
||||
}
|
||||
SRETURN(h);
|
||||
}
|
||||
EXPORT_SYMBOL(highbit64);
|
||||
|
||||
#if BITS_PER_LONG == 32
|
||||
/*
|
||||
* Support 64/64 => 64 division on a 32-bit platform. While the kernel
|
||||
|
@ -438,17 +370,6 @@ __put_task_struct(struct task_struct *t)
|
|||
EXPORT_SYMBOL(__put_task_struct);
|
||||
#endif /* HAVE_PUT_TASK_STRUCT */
|
||||
|
||||
struct new_utsname *__utsname(void)
|
||||
{
|
||||
#ifdef HAVE_INIT_UTSNAME
|
||||
return init_utsname();
|
||||
#else
|
||||
return &system_utsname;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(__utsname);
|
||||
|
||||
|
||||
/*
|
||||
* Read the unique system identifier from the /etc/hostid file.
|
||||
*
|
||||
|
@ -564,63 +485,6 @@ zone_get_hostid(void *zone)
|
|||
}
|
||||
EXPORT_SYMBOL(zone_get_hostid);
|
||||
|
||||
#ifndef HAVE_KALLSYMS_LOOKUP_NAME
|
||||
/*
|
||||
* The kallsyms_lookup_name() kernel function is not an exported symbol in
|
||||
* Linux 2.6.19 through 2.6.32 inclusive.
|
||||
*
|
||||
* This function replaces the functionality by performing an upcall to user
|
||||
* space where /proc/kallsyms is consulted for the requested address.
|
||||
*
|
||||
*/
|
||||
|
||||
#define GET_KALLSYMS_ADDR_CMD \
|
||||
"exec 0</dev/null " \
|
||||
" 1>/proc/sys/kernel/spl/kallsyms_lookup_name " \
|
||||
" 2>/dev/null; " \
|
||||
"awk '{ if ( $3 == \"kallsyms_lookup_name\" ) { print $1 } }' " \
|
||||
" /proc/kallsyms "
|
||||
|
||||
static int
|
||||
set_kallsyms_lookup_name(void)
|
||||
{
|
||||
char *argv[] = { "/bin/sh",
|
||||
"-c",
|
||||
GET_KALLSYMS_ADDR_CMD,
|
||||
NULL };
|
||||
char *envp[] = { "HOME=/",
|
||||
"TERM=linux",
|
||||
"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
|
||||
NULL };
|
||||
int rc;
|
||||
|
||||
rc = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
|
||||
|
||||
/*
|
||||
* Due to I/O buffering the helper may return successfully before
|
||||
* the proc handler has a chance to execute. To catch this case
|
||||
* wait up to 1 second to verify spl_kallsyms_lookup_name_fn was
|
||||
* updated to a non SYMBOL_POISON value.
|
||||
*/
|
||||
if (rc == 0) {
|
||||
rc = wait_event_timeout(spl_kallsyms_lookup_name_waitq,
|
||||
spl_kallsyms_lookup_name_fn != SYMBOL_POISON, HZ);
|
||||
if (rc == 0)
|
||||
rc = -ETIMEDOUT;
|
||||
else if (spl_kallsyms_lookup_name_fn == SYMBOL_POISON)
|
||||
rc = -EFAULT;
|
||||
else
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
if (rc)
|
||||
printk("SPL: Failed user helper '%s %s %s', rc = %d\n",
|
||||
argv[0], argv[1], argv[2], rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int
|
||||
__init spl_init(void)
|
||||
{
|
||||
|
@ -656,19 +520,10 @@ __init spl_init(void)
|
|||
if ((rc = spl_zlib_init()))
|
||||
SGOTO(out9, rc);
|
||||
|
||||
#ifndef HAVE_KALLSYMS_LOOKUP_NAME
|
||||
if ((rc = set_kallsyms_lookup_name()))
|
||||
SGOTO(out10, rc = -EADDRNOTAVAIL);
|
||||
#endif /* HAVE_KALLSYMS_LOOKUP_NAME */
|
||||
|
||||
if ((rc = spl_kmem_init_kallsyms_lookup()))
|
||||
SGOTO(out10, rc);
|
||||
|
||||
printk(KERN_NOTICE "SPL: Loaded module v%s-%s%s\n", SPL_META_VERSION,
|
||||
SPL_META_RELEASE, SPL_DEBUG_STR);
|
||||
SRETURN(rc);
|
||||
out10:
|
||||
spl_zlib_fini();
|
||||
|
||||
out9:
|
||||
spl_tsd_fini();
|
||||
out8:
|
||||
|
@ -740,7 +595,7 @@ EXPORT_SYMBOL(spl_cleanup);
|
|||
module_init(spl_init);
|
||||
module_exit(spl_fini);
|
||||
|
||||
MODULE_AUTHOR("Lawrence Livermore National Labs");
|
||||
MODULE_DESCRIPTION("Solaris Porting Layer");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR(SPL_META_AUTHOR);
|
||||
MODULE_LICENSE(SPL_META_LICENSE);
|
||||
MODULE_VERSION(SPL_META_VERSION "-" SPL_META_RELEASE);
|
||||
|
|
|
@ -101,44 +101,6 @@ module_param(spl_kmem_cache_kmem_limit, uint, 0644);
|
|||
MODULE_PARM_DESC(spl_kmem_cache_kmem_limit,
|
||||
"Objects less than N bytes use the kmalloc");
|
||||
|
||||
/*
|
||||
* The minimum amount of memory measured in pages to be free at all
|
||||
* times on the system. This is similar to Linux's zone->pages_min
|
||||
* multiplied by the number of zones and is sized based on that.
|
||||
*/
|
||||
pgcnt_t minfree = 0;
|
||||
EXPORT_SYMBOL(minfree);
|
||||
|
||||
/*
|
||||
* The desired amount of memory measured in pages to be free at all
|
||||
* times on the system. This is similar to Linux's zone->pages_low
|
||||
* multiplied by the number of zones and is sized based on that.
|
||||
* Assuming all zones are being used roughly equally, when we drop
|
||||
* below this threshold asynchronous page reclamation is triggered.
|
||||
*/
|
||||
pgcnt_t desfree = 0;
|
||||
EXPORT_SYMBOL(desfree);
|
||||
|
||||
/*
|
||||
* When above this amount of memory measures in pages the system is
|
||||
* determined to have enough free memory. This is similar to Linux's
|
||||
* zone->pages_high multiplied by the number of zones and is sized based
|
||||
* on that. Assuming all zones are being used roughly equally, when
|
||||
* asynchronous page reclamation reaches this threshold it stops.
|
||||
*/
|
||||
pgcnt_t lotsfree = 0;
|
||||
EXPORT_SYMBOL(lotsfree);
|
||||
|
||||
/* Unused always 0 in this implementation */
|
||||
pgcnt_t needfree = 0;
|
||||
EXPORT_SYMBOL(needfree);
|
||||
|
||||
pgcnt_t swapfs_minfree = 0;
|
||||
EXPORT_SYMBOL(swapfs_minfree);
|
||||
|
||||
pgcnt_t swapfs_reserve = 0;
|
||||
EXPORT_SYMBOL(swapfs_reserve);
|
||||
|
||||
vmem_t *heap_arena = NULL;
|
||||
EXPORT_SYMBOL(heap_arena);
|
||||
|
||||
|
@ -148,142 +110,14 @@ EXPORT_SYMBOL(zio_alloc_arena);
|
|||
vmem_t *zio_arena = NULL;
|
||||
EXPORT_SYMBOL(zio_arena);
|
||||
|
||||
#ifndef HAVE_GET_VMALLOC_INFO
|
||||
get_vmalloc_info_t get_vmalloc_info_fn = SYMBOL_POISON;
|
||||
EXPORT_SYMBOL(get_vmalloc_info_fn);
|
||||
#endif /* HAVE_GET_VMALLOC_INFO */
|
||||
|
||||
#ifdef HAVE_PGDAT_HELPERS
|
||||
# ifndef HAVE_FIRST_ONLINE_PGDAT
|
||||
first_online_pgdat_t first_online_pgdat_fn = SYMBOL_POISON;
|
||||
EXPORT_SYMBOL(first_online_pgdat_fn);
|
||||
# endif /* HAVE_FIRST_ONLINE_PGDAT */
|
||||
|
||||
# ifndef HAVE_NEXT_ONLINE_PGDAT
|
||||
next_online_pgdat_t next_online_pgdat_fn = SYMBOL_POISON;
|
||||
EXPORT_SYMBOL(next_online_pgdat_fn);
|
||||
# endif /* HAVE_NEXT_ONLINE_PGDAT */
|
||||
|
||||
# ifndef HAVE_NEXT_ZONE
|
||||
next_zone_t next_zone_fn = SYMBOL_POISON;
|
||||
EXPORT_SYMBOL(next_zone_fn);
|
||||
# endif /* HAVE_NEXT_ZONE */
|
||||
|
||||
#else /* HAVE_PGDAT_HELPERS */
|
||||
|
||||
# ifndef HAVE_PGDAT_LIST
|
||||
struct pglist_data *pgdat_list_addr = SYMBOL_POISON;
|
||||
EXPORT_SYMBOL(pgdat_list_addr);
|
||||
# endif /* HAVE_PGDAT_LIST */
|
||||
|
||||
#endif /* HAVE_PGDAT_HELPERS */
|
||||
|
||||
#ifdef NEED_GET_ZONE_COUNTS
|
||||
# ifndef HAVE_GET_ZONE_COUNTS
|
||||
get_zone_counts_t get_zone_counts_fn = SYMBOL_POISON;
|
||||
EXPORT_SYMBOL(get_zone_counts_fn);
|
||||
# endif /* HAVE_GET_ZONE_COUNTS */
|
||||
|
||||
unsigned long
|
||||
spl_global_page_state(spl_zone_stat_item_t item)
|
||||
{
|
||||
unsigned long active;
|
||||
unsigned long inactive;
|
||||
unsigned long free;
|
||||
|
||||
get_zone_counts(&active, &inactive, &free);
|
||||
switch (item) {
|
||||
case SPL_NR_FREE_PAGES: return free;
|
||||
case SPL_NR_INACTIVE: return inactive;
|
||||
case SPL_NR_ACTIVE: return active;
|
||||
default: ASSERT(0); /* Unsupported */
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
# ifdef HAVE_GLOBAL_PAGE_STATE
|
||||
unsigned long
|
||||
spl_global_page_state(spl_zone_stat_item_t item)
|
||||
{
|
||||
unsigned long pages = 0;
|
||||
|
||||
switch (item) {
|
||||
case SPL_NR_FREE_PAGES:
|
||||
# ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
|
||||
pages += global_page_state(NR_FREE_PAGES);
|
||||
# endif
|
||||
break;
|
||||
case SPL_NR_INACTIVE:
|
||||
# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
|
||||
pages += global_page_state(NR_INACTIVE);
|
||||
# endif
|
||||
# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
|
||||
pages += global_page_state(NR_INACTIVE_ANON);
|
||||
# endif
|
||||
# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
|
||||
pages += global_page_state(NR_INACTIVE_FILE);
|
||||
# endif
|
||||
break;
|
||||
case SPL_NR_ACTIVE:
|
||||
# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
|
||||
pages += global_page_state(NR_ACTIVE);
|
||||
# endif
|
||||
# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
|
||||
pages += global_page_state(NR_ACTIVE_ANON);
|
||||
# endif
|
||||
# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
|
||||
pages += global_page_state(NR_ACTIVE_FILE);
|
||||
# endif
|
||||
break;
|
||||
default:
|
||||
ASSERT(0); /* Unsupported */
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
# else
|
||||
# error "Both global_page_state() and get_zone_counts() unavailable"
|
||||
# endif /* HAVE_GLOBAL_PAGE_STATE */
|
||||
#endif /* NEED_GET_ZONE_COUNTS */
|
||||
EXPORT_SYMBOL(spl_global_page_state);
|
||||
|
||||
#ifndef HAVE_SHRINK_DCACHE_MEMORY
|
||||
shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
|
||||
EXPORT_SYMBOL(shrink_dcache_memory_fn);
|
||||
#endif /* HAVE_SHRINK_DCACHE_MEMORY */
|
||||
|
||||
#ifndef HAVE_SHRINK_ICACHE_MEMORY
|
||||
shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON;
|
||||
EXPORT_SYMBOL(shrink_icache_memory_fn);
|
||||
#endif /* HAVE_SHRINK_ICACHE_MEMORY */
|
||||
|
||||
pgcnt_t
|
||||
spl_kmem_availrmem(void)
|
||||
{
|
||||
/* The amount of easily available memory */
|
||||
return (spl_global_page_state(SPL_NR_FREE_PAGES) +
|
||||
spl_global_page_state(SPL_NR_INACTIVE));
|
||||
}
|
||||
EXPORT_SYMBOL(spl_kmem_availrmem);
|
||||
|
||||
size_t
|
||||
vmem_size(vmem_t *vmp, int typemask)
|
||||
{
|
||||
struct vmalloc_info vmi;
|
||||
size_t size = 0;
|
||||
ASSERT3P(vmp, ==, NULL);
|
||||
ASSERT3S(typemask & VMEM_ALLOC, ==, VMEM_ALLOC);
|
||||
ASSERT3S(typemask & VMEM_FREE, ==, VMEM_FREE);
|
||||
|
||||
ASSERT(vmp == NULL);
|
||||
ASSERT(typemask & (VMEM_ALLOC | VMEM_FREE));
|
||||
|
||||
get_vmalloc_info(&vmi);
|
||||
if (typemask & VMEM_ALLOC)
|
||||
size += (size_t)vmi.used;
|
||||
|
||||
if (typemask & VMEM_FREE)
|
||||
size += (size_t)(VMALLOC_TOTAL - vmi.used);
|
||||
|
||||
return size;
|
||||
return (VMALLOC_TOTAL);
|
||||
}
|
||||
EXPORT_SYMBOL(vmem_size);
|
||||
|
||||
|
@ -294,29 +128,6 @@ kmem_debugging(void)
|
|||
}
|
||||
EXPORT_SYMBOL(kmem_debugging);
|
||||
|
||||
#ifndef HAVE_KVASPRINTF
|
||||
/* Simplified asprintf. */
|
||||
char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
|
||||
{
|
||||
unsigned int len;
|
||||
char *p;
|
||||
va_list aq;
|
||||
|
||||
va_copy(aq, ap);
|
||||
len = vsnprintf(NULL, 0, fmt, aq);
|
||||
va_end(aq);
|
||||
|
||||
p = kmalloc(len+1, gfp);
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
vsnprintf(p, len+1, fmt, ap);
|
||||
|
||||
return p;
|
||||
}
|
||||
EXPORT_SYMBOL(kvasprintf);
|
||||
#endif /* HAVE_KVASPRINTF */
|
||||
|
||||
char *
|
||||
kmem_vasprintf(const char *fmt, va_list ap)
|
||||
{
|
||||
|
@ -977,7 +788,7 @@ spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
|
|||
static inline uint32_t
|
||||
spl_offslab_size(spl_kmem_cache_t *skc)
|
||||
{
|
||||
return 1UL << (highbit(spl_obj_size(skc)) + 1);
|
||||
return 1UL << (fls64(spl_obj_size(skc)) + 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1391,7 +1202,7 @@ spl_cache_age(void *data)
|
|||
atomic_inc(&skc->skc_ref);
|
||||
|
||||
if (!(skc->skc_flags & KMC_NOMAGAZINE))
|
||||
spl_on_each_cpu(spl_magazine_age, skc, 1);
|
||||
on_each_cpu(spl_magazine_age, skc, 1);
|
||||
|
||||
spl_slab_reclaim(skc, skc->skc_reap, 0);
|
||||
|
||||
|
@ -2276,7 +2087,7 @@ __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
|
|||
if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
|
||||
return (-1);
|
||||
|
||||
return MAX((alloc * sysctl_vfs_cache_pressure) / 100, 0);
|
||||
return (MAX(alloc, 0));
|
||||
}
|
||||
|
||||
SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
|
||||
|
@ -2472,114 +2283,6 @@ spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
|
|||
#define spl_kmem_fini_tracking(list, lock)
|
||||
#endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
|
||||
|
||||
static void
|
||||
spl_kmem_init_globals(void)
|
||||
{
|
||||
struct zone *zone;
|
||||
|
||||
/* For now all zones are includes, it may be wise to restrict
|
||||
* this to normal and highmem zones if we see problems. */
|
||||
for_each_zone(zone) {
|
||||
|
||||
if (!populated_zone(zone))
|
||||
continue;
|
||||
|
||||
minfree += min_wmark_pages(zone);
|
||||
desfree += low_wmark_pages(zone);
|
||||
lotsfree += high_wmark_pages(zone);
|
||||
}
|
||||
|
||||
/* Solaris default values */
|
||||
swapfs_minfree = MAX(2*1024*1024 >> PAGE_SHIFT, physmem >> 3);
|
||||
swapfs_reserve = MIN(4*1024*1024 >> PAGE_SHIFT, physmem >> 4);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called at module init when it is safe to use spl_kallsyms_lookup_name()
|
||||
*/
|
||||
int
|
||||
spl_kmem_init_kallsyms_lookup(void)
|
||||
{
|
||||
#ifndef HAVE_GET_VMALLOC_INFO
|
||||
get_vmalloc_info_fn = (get_vmalloc_info_t)
|
||||
spl_kallsyms_lookup_name("get_vmalloc_info");
|
||||
if (!get_vmalloc_info_fn) {
|
||||
printk(KERN_ERR "Error: Unknown symbol get_vmalloc_info\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
#endif /* HAVE_GET_VMALLOC_INFO */
|
||||
|
||||
#ifdef HAVE_PGDAT_HELPERS
|
||||
# ifndef HAVE_FIRST_ONLINE_PGDAT
|
||||
first_online_pgdat_fn = (first_online_pgdat_t)
|
||||
spl_kallsyms_lookup_name("first_online_pgdat");
|
||||
if (!first_online_pgdat_fn) {
|
||||
printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
# endif /* HAVE_FIRST_ONLINE_PGDAT */
|
||||
|
||||
# ifndef HAVE_NEXT_ONLINE_PGDAT
|
||||
next_online_pgdat_fn = (next_online_pgdat_t)
|
||||
spl_kallsyms_lookup_name("next_online_pgdat");
|
||||
if (!next_online_pgdat_fn) {
|
||||
printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
# endif /* HAVE_NEXT_ONLINE_PGDAT */
|
||||
|
||||
# ifndef HAVE_NEXT_ZONE
|
||||
next_zone_fn = (next_zone_t)
|
||||
spl_kallsyms_lookup_name("next_zone");
|
||||
if (!next_zone_fn) {
|
||||
printk(KERN_ERR "Error: Unknown symbol next_zone\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
# endif /* HAVE_NEXT_ZONE */
|
||||
|
||||
#else /* HAVE_PGDAT_HELPERS */
|
||||
|
||||
# ifndef HAVE_PGDAT_LIST
|
||||
pgdat_list_addr = *(struct pglist_data **)
|
||||
spl_kallsyms_lookup_name("pgdat_list");
|
||||
if (!pgdat_list_addr) {
|
||||
printk(KERN_ERR "Error: Unknown symbol pgdat_list\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
# endif /* HAVE_PGDAT_LIST */
|
||||
#endif /* HAVE_PGDAT_HELPERS */
|
||||
|
||||
#if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
|
||||
get_zone_counts_fn = (get_zone_counts_t)
|
||||
spl_kallsyms_lookup_name("get_zone_counts");
|
||||
if (!get_zone_counts_fn) {
|
||||
printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
#endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
|
||||
|
||||
/*
|
||||
* It is now safe to initialize the global tunings which rely on
|
||||
* the use of the for_each_zone() macro. This macro in turns
|
||||
* depends on the *_pgdat symbols which are now available.
|
||||
*/
|
||||
spl_kmem_init_globals();
|
||||
|
||||
#ifndef HAVE_SHRINK_DCACHE_MEMORY
|
||||
/* When shrink_dcache_memory_fn == NULL support is disabled */
|
||||
shrink_dcache_memory_fn = (shrink_dcache_memory_t)
|
||||
spl_kallsyms_lookup_name("shrink_dcache_memory");
|
||||
#endif /* HAVE_SHRINK_DCACHE_MEMORY */
|
||||
|
||||
#ifndef HAVE_SHRINK_ICACHE_MEMORY
|
||||
/* When shrink_icache_memory_fn == NULL support is disabled */
|
||||
shrink_icache_memory_fn = (shrink_icache_memory_t)
|
||||
spl_kallsyms_lookup_name("shrink_icache_memory");
|
||||
#endif /* HAVE_SHRINK_ICACHE_MEMORY */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
spl_kmem_init(void)
|
||||
{
|
||||
|
|
|
@ -32,46 +32,5 @@
|
|||
|
||||
#define DEBUG_SUBSYSTEM S_MUTEX
|
||||
|
||||
/*
|
||||
* While a standard mutex implementation has been available in the kernel
|
||||
* for quite some time. It was not until 2.6.29 and latter kernels that
|
||||
* adaptive mutexs were embraced and integrated with the scheduler. This
|
||||
* brought a significant performance improvement, but just as importantly
|
||||
* it added a lock owner to the generic mutex outside CONFIG_DEBUG_MUTEXES
|
||||
* builds. This is critical for correctly supporting the mutex_owner()
|
||||
* Solaris primitive. When the owner is available we use a pure Linux
|
||||
* mutex implementation. When the owner is not available we still use
|
||||
* Linux mutexs as a base but also reserve space for an owner field right
|
||||
* after the mutex structure.
|
||||
*
|
||||
* In the case when HAVE_MUTEX_OWNER is not defined your code may
|
||||
* still me able to leverage adaptive mutexs. As long as the task_curr()
|
||||
* symbol is exported this code will provide a poor mans adaptive mutex
|
||||
* implementation. However, this is not required and if the symbol is
|
||||
* unavailable we provide a standard mutex.
|
||||
*/
|
||||
|
||||
#if !defined(HAVE_MUTEX_OWNER) || !defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
|
||||
#ifdef HAVE_TASK_CURR
|
||||
/*
|
||||
* mutex_spin_max = { 0, -1, 1-MAX_INT }
|
||||
* 0: Never spin when trying to acquire lock
|
||||
* -1: Spin until acquired or holder yields without dropping lock
|
||||
* 1-MAX_INT: Spin for N attempts before sleeping for lock
|
||||
*/
|
||||
int mutex_spin_max = 0;
|
||||
module_param(mutex_spin_max, int, 0644);
|
||||
MODULE_PARM_DESC(mutex_spin_max, "Spin a maximum of N times to acquire lock");
|
||||
|
||||
int
|
||||
spl_mutex_spin_max(void)
|
||||
{
|
||||
return mutex_spin_max;
|
||||
}
|
||||
EXPORT_SYMBOL(spl_mutex_spin_max);
|
||||
|
||||
#endif /* HAVE_TASK_CURR */
|
||||
#endif /* !HAVE_MUTEX_OWNER */
|
||||
|
||||
int spl_mutex_init(void) { return 0; }
|
||||
void spl_mutex_fini(void) { }
|
||||
|
|
|
@ -49,10 +49,7 @@ static unsigned long table_min = 0;
|
|||
static unsigned long table_max = ~0;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
static struct ctl_table_header *spl_header = NULL;
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static struct proc_dir_entry *proc_spl = NULL;
|
||||
#ifdef DEBUG_KMEM
|
||||
static struct proc_dir_entry *proc_spl_kmem = NULL;
|
||||
|
@ -60,121 +57,6 @@ static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
|
|||
#endif /* DEBUG_KMEM */
|
||||
struct proc_dir_entry *proc_spl_kstat = NULL;
|
||||
|
||||
#ifdef HAVE_CTL_NAME
|
||||
#ifdef HAVE_CTL_UNNUMBERED
|
||||
|
||||
#define CTL_SPL CTL_UNNUMBERED
|
||||
#define CTL_SPL_DEBUG CTL_UNNUMBERED
|
||||
#define CTL_SPL_VM CTL_UNNUMBERED
|
||||
#define CTL_SPL_MUTEX CTL_UNNUMBERED
|
||||
#define CTL_SPL_KMEM CTL_UNNUMBERED
|
||||
#define CTL_SPL_KSTAT CTL_UNNUMBERED
|
||||
|
||||
#define CTL_VERSION CTL_UNNUMBERED /* Version */
|
||||
#define CTL_HOSTID CTL_UNNUMBERED /* Host id by /usr/bin/hostid */
|
||||
#define CTL_KALLSYMS CTL_UNNUMBERED /* kallsyms_lookup_name addr */
|
||||
|
||||
#define CTL_DEBUG_SUBSYS CTL_UNNUMBERED /* Debug subsystem */
|
||||
#define CTL_DEBUG_MASK CTL_UNNUMBERED /* Debug mask */
|
||||
#define CTL_DEBUG_PRINTK CTL_UNNUMBERED /* All messages to console */
|
||||
#define CTL_DEBUG_MB CTL_UNNUMBERED /* Debug buffer size */
|
||||
#define CTL_DEBUG_BINARY CTL_UNNUMBERED /* Binary data in buffer */
|
||||
#define CTL_DEBUG_CATASTROPHE CTL_UNNUMBERED /* Set if BUG'd or panic'd */
|
||||
#define CTL_DEBUG_PANIC_ON_BUG CTL_UNNUMBERED /* Should panic on BUG */
|
||||
#define CTL_DEBUG_PATH CTL_UNNUMBERED /* Dump log location */
|
||||
#define CTL_DEBUG_DUMP CTL_UNNUMBERED /* Dump debug buffer to file */
|
||||
#define CTL_DEBUG_FORCE_BUG CTL_UNNUMBERED /* Hook to force a BUG */
|
||||
#define CTL_DEBUG_STACK_SIZE CTL_UNNUMBERED /* Max observed stack size */
|
||||
|
||||
#define CTL_CONSOLE_RATELIMIT CTL_UNNUMBERED /* Ratelimit console messages */
|
||||
#define CTL_CONSOLE_MAX_DELAY_CS CTL_UNNUMBERED /* Max delay skip messages */
|
||||
#define CTL_CONSOLE_MIN_DELAY_CS CTL_UNNUMBERED /* Init delay skip messages */
|
||||
#define CTL_CONSOLE_BACKOFF CTL_UNNUMBERED /* Delay increase factor */
|
||||
|
||||
#define CTL_VM_MINFREE CTL_UNNUMBERED /* Minimum free memory */
|
||||
#define CTL_VM_DESFREE CTL_UNNUMBERED /* Desired free memory */
|
||||
#define CTL_VM_LOTSFREE CTL_UNNUMBERED /* Lots of free memory */
|
||||
#define CTL_VM_NEEDFREE CTL_UNNUMBERED /* Need free memory */
|
||||
#define CTL_VM_SWAPFS_MINFREE CTL_UNNUMBERED /* Minimum swapfs memory */
|
||||
#define CTL_VM_SWAPFS_RESERVE CTL_UNNUMBERED /* Reserved swapfs memory */
|
||||
#define CTL_VM_AVAILRMEM CTL_UNNUMBERED /* Easily available memory */
|
||||
#define CTL_VM_FREEMEM CTL_UNNUMBERED /* Free memory */
|
||||
#define CTL_VM_PHYSMEM CTL_UNNUMBERED /* Total physical memory */
|
||||
|
||||
#ifdef DEBUG_KMEM
|
||||
#define CTL_KMEM_KMEMUSED CTL_UNNUMBERED /* Alloc'd kmem bytes */
|
||||
#define CTL_KMEM_KMEMMAX CTL_UNNUMBERED /* Max alloc'd by kmem bytes */
|
||||
#define CTL_KMEM_VMEMUSED CTL_UNNUMBERED /* Alloc'd vmem bytes */
|
||||
#define CTL_KMEM_VMEMMAX CTL_UNNUMBERED /* Max alloc'd by vmem bytes */
|
||||
#define CTL_KMEM_SLAB_KMEMTOTAL CTL_UNNUMBERED /* Total kmem slab size */
|
||||
#define CTL_KMEM_SLAB_KMEMALLOC CTL_UNNUMBERED /* Alloc'd kmem slab size */
|
||||
#define CTL_KMEM_SLAB_KMEMMAX CTL_UNNUMBERED /* Max kmem slab size */
|
||||
#define CTL_KMEM_SLAB_VMEMTOTAL CTL_UNNUMBERED /* Total vmem slab size */
|
||||
#define CTL_KMEM_SLAB_VMEMALLOC CTL_UNNUMBERED /* Alloc'd vmem slab size */
|
||||
#define CTL_KMEM_SLAB_VMEMMAX CTL_UNNUMBERED /* Max vmem slab size */
|
||||
#endif
|
||||
|
||||
#else /* HAVE_CTL_UNNUMBERED */
|
||||
|
||||
enum {
|
||||
CTL_SPL = 0x87,
|
||||
CTL_SPL_DEBUG = 0x88,
|
||||
CTL_SPL_VM = 0x89,
|
||||
CTL_SPL_MUTEX = 0x90,
|
||||
CTL_SPL_KMEM = 0x91,
|
||||
CTL_SPL_KSTAT = 0x92,
|
||||
};
|
||||
|
||||
enum {
|
||||
CTL_VERSION = 1, /* Version */
|
||||
CTL_HOSTID, /* Host id reported by /usr/bin/hostid */
|
||||
CTL_KALLSYMS, /* Address of kallsyms_lookup_name */
|
||||
|
||||
#ifdef DEBUG_LOG
|
||||
CTL_DEBUG_SUBSYS, /* Debug subsystem */
|
||||
CTL_DEBUG_MASK, /* Debug mask */
|
||||
CTL_DEBUG_PRINTK, /* Force all messages to console */
|
||||
CTL_DEBUG_MB, /* Debug buffer size */
|
||||
CTL_DEBUG_BINARY, /* Include binary data in buffer */
|
||||
CTL_DEBUG_CATASTROPHE, /* Set if we have BUG'd or panic'd */
|
||||
CTL_DEBUG_PANIC_ON_BUG, /* Set if we should panic on BUG */
|
||||
CTL_DEBUG_PATH, /* Dump log location */
|
||||
CTL_DEBUG_DUMP, /* Dump debug buffer to file */
|
||||
CTL_DEBUG_FORCE_BUG, /* Hook to force a BUG */
|
||||
CTL_DEBUG_STACK_SIZE, /* Max observed stack size */
|
||||
#endif
|
||||
|
||||
CTL_CONSOLE_RATELIMIT, /* Ratelimit console messages */
|
||||
CTL_CONSOLE_MAX_DELAY_CS, /* Max delay which we skip messages */
|
||||
CTL_CONSOLE_MIN_DELAY_CS, /* Init delay which we skip messages */
|
||||
CTL_CONSOLE_BACKOFF, /* Delay increase factor */
|
||||
|
||||
CTL_VM_MINFREE, /* Minimum free memory threshold */
|
||||
CTL_VM_DESFREE, /* Desired free memory threshold */
|
||||
CTL_VM_LOTSFREE, /* Lots of free memory threshold */
|
||||
CTL_VM_NEEDFREE, /* Need free memory deficit */
|
||||
CTL_VM_SWAPFS_MINFREE, /* Minimum swapfs memory */
|
||||
CTL_VM_SWAPFS_RESERVE, /* Reserved swapfs memory */
|
||||
CTL_VM_AVAILRMEM, /* Easily available memory */
|
||||
CTL_VM_FREEMEM, /* Free memory */
|
||||
CTL_VM_PHYSMEM, /* Total physical memory */
|
||||
|
||||
#ifdef DEBUG_KMEM
|
||||
CTL_KMEM_KMEMUSED, /* Alloc'd kmem bytes */
|
||||
CTL_KMEM_KMEMMAX, /* Max alloc'd by kmem bytes */
|
||||
CTL_KMEM_VMEMUSED, /* Alloc'd vmem bytes */
|
||||
CTL_KMEM_VMEMMAX, /* Max alloc'd by vmem bytes */
|
||||
CTL_KMEM_SLAB_KMEMTOTAL, /* Total kmem slab size */
|
||||
CTL_KMEM_SLAB_KMEMALLOC, /* Alloc'd kmem slab size */
|
||||
CTL_KMEM_SLAB_KMEMMAX, /* Max kmem slab size */
|
||||
CTL_KMEM_SLAB_VMEMTOTAL, /* Total vmem slab size */
|
||||
CTL_KMEM_SLAB_VMEMALLOC, /* Alloc'd vmem slab size */
|
||||
CTL_KMEM_SLAB_VMEMMAX, /* Max vmem slab size */
|
||||
#endif
|
||||
};
|
||||
#endif /* HAVE_CTL_UNNUMBERED */
|
||||
#endif /* HAVE_CTL_NAME */
|
||||
|
||||
static int
|
||||
proc_copyin_string(char *kbuffer, int kbuffer_size,
|
||||
const char *ubuffer, int ubuffer_size)
|
||||
|
@ -229,7 +111,9 @@ proc_copyout_string(char *ubuffer, int ubuffer_size,
|
|||
}
|
||||
|
||||
#ifdef DEBUG_LOG
|
||||
SPL_PROC_HANDLER(proc_dobitmasks)
|
||||
static int
|
||||
proc_dobitmasks(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
unsigned long *mask = table->data;
|
||||
int is_subsys = (mask == &spl_debug_subsys) ? 1 : 0;
|
||||
|
@ -270,7 +154,9 @@ SPL_PROC_HANDLER(proc_dobitmasks)
|
|||
SRETURN(rc);
|
||||
}
|
||||
|
||||
SPL_PROC_HANDLER(proc_debug_mb)
|
||||
static int
|
||||
proc_debug_mb(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
char str[32];
|
||||
int rc, len;
|
||||
|
@ -299,7 +185,9 @@ SPL_PROC_HANDLER(proc_debug_mb)
|
|||
SRETURN(rc);
|
||||
}
|
||||
|
||||
SPL_PROC_HANDLER(proc_dump_kernel)
|
||||
static int
|
||||
proc_dump_kernel(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
SENTRY;
|
||||
|
||||
|
@ -313,7 +201,9 @@ SPL_PROC_HANDLER(proc_dump_kernel)
|
|||
SRETURN(0);
|
||||
}
|
||||
|
||||
SPL_PROC_HANDLER(proc_force_bug)
|
||||
static int
|
||||
proc_force_bug(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
SENTRY;
|
||||
|
||||
|
@ -325,7 +215,9 @@ SPL_PROC_HANDLER(proc_force_bug)
|
|||
SRETURN(0);
|
||||
}
|
||||
|
||||
SPL_PROC_HANDLER(proc_console_max_delay_cs)
|
||||
static int
|
||||
proc_console_max_delay_cs(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int rc, max_delay_cs;
|
||||
spl_ctl_table dummy = *table;
|
||||
|
@ -337,7 +229,7 @@ SPL_PROC_HANDLER(proc_console_max_delay_cs)
|
|||
|
||||
if (write) {
|
||||
max_delay_cs = 0;
|
||||
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
|
||||
rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
|
||||
if (rc < 0)
|
||||
SRETURN(rc);
|
||||
|
||||
|
@ -351,13 +243,15 @@ SPL_PROC_HANDLER(proc_console_max_delay_cs)
|
|||
spl_console_max_delay = d;
|
||||
} else {
|
||||
max_delay_cs = (spl_console_max_delay * 100) / HZ;
|
||||
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
|
||||
rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
}
|
||||
|
||||
SPL_PROC_HANDLER(proc_console_min_delay_cs)
|
||||
static int
|
||||
proc_console_min_delay_cs(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int rc, min_delay_cs;
|
||||
spl_ctl_table dummy = *table;
|
||||
|
@ -369,7 +263,7 @@ SPL_PROC_HANDLER(proc_console_min_delay_cs)
|
|||
|
||||
if (write) {
|
||||
min_delay_cs = 0;
|
||||
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
|
||||
rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
|
||||
if (rc < 0)
|
||||
SRETURN(rc);
|
||||
|
||||
|
@ -383,13 +277,15 @@ SPL_PROC_HANDLER(proc_console_min_delay_cs)
|
|||
spl_console_min_delay = d;
|
||||
} else {
|
||||
min_delay_cs = (spl_console_min_delay * 100) / HZ;
|
||||
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
|
||||
rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
}
|
||||
|
||||
SPL_PROC_HANDLER(proc_console_backoff)
|
||||
static int
|
||||
proc_console_backoff(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int rc, backoff;
|
||||
spl_ctl_table dummy = *table;
|
||||
|
@ -400,7 +296,7 @@ SPL_PROC_HANDLER(proc_console_backoff)
|
|||
|
||||
if (write) {
|
||||
backoff = 0;
|
||||
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
|
||||
rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
|
||||
if (rc < 0)
|
||||
SRETURN(rc);
|
||||
|
||||
|
@ -410,7 +306,7 @@ SPL_PROC_HANDLER(proc_console_backoff)
|
|||
spl_console_backoff = backoff;
|
||||
} else {
|
||||
backoff = spl_console_backoff;
|
||||
rc = spl_proc_dointvec(&dummy,write,filp,buffer,lenp,ppos);
|
||||
rc = proc_dointvec(&dummy, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
|
@ -418,7 +314,9 @@ SPL_PROC_HANDLER(proc_console_backoff)
|
|||
#endif /* DEBUG_LOG */
|
||||
|
||||
#ifdef DEBUG_KMEM
|
||||
SPL_PROC_HANDLER(proc_domemused)
|
||||
static int
|
||||
proc_domemused(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned long min = 0, max = ~0, val;
|
||||
|
@ -438,14 +336,15 @@ SPL_PROC_HANDLER(proc_domemused)
|
|||
# else
|
||||
val = atomic_read((atomic_t *)table->data);
|
||||
# endif /* HAVE_ATOMIC64_T */
|
||||
rc = spl_proc_doulongvec_minmax(&dummy, write, filp,
|
||||
buffer, lenp, ppos);
|
||||
rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
}
|
||||
|
||||
SPL_PROC_HANDLER(proc_doslab)
|
||||
static int
|
||||
proc_doslab(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned long min = 0, max = ~0, val = 0, mask;
|
||||
|
@ -485,22 +384,23 @@ SPL_PROC_HANDLER(proc_doslab)
|
|||
}
|
||||
|
||||
up_read(&spl_kmem_cache_sem);
|
||||
rc = spl_proc_doulongvec_minmax(&dummy, write, filp,
|
||||
buffer, lenp, ppos);
|
||||
rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
}
|
||||
#endif /* DEBUG_KMEM */
|
||||
|
||||
SPL_PROC_HANDLER(proc_dohostid)
|
||||
static int
|
||||
proc_dohostid(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int len, rc = 0;
|
||||
char *end, str[32];
|
||||
SENTRY;
|
||||
|
||||
if (write) {
|
||||
/* We can't use spl_proc_doulongvec_minmax() in the write
|
||||
/* We can't use proc_doulongvec_minmax() in the write
|
||||
* case here because hostid while a hex value has no
|
||||
* leading 0x which confuses the helper function. */
|
||||
rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
|
||||
|
@ -527,100 +427,6 @@ SPL_PROC_HANDLER(proc_dohostid)
|
|||
SRETURN(rc);
|
||||
}
|
||||
|
||||
#ifndef HAVE_KALLSYMS_LOOKUP_NAME
|
||||
SPL_PROC_HANDLER(proc_dokallsyms_lookup_name)
|
||||
{
|
||||
int len, rc = 0;
|
||||
char *end, str[32];
|
||||
SENTRY;
|
||||
|
||||
if (write) {
|
||||
/* This may only be set once at module load time */
|
||||
if (spl_kallsyms_lookup_name_fn != SYMBOL_POISON)
|
||||
SRETURN(-EEXIST);
|
||||
|
||||
/* We can't use spl_proc_doulongvec_minmax() in the write
|
||||
* case here because the address while a hex value has no
|
||||
* leading 0x which confuses the helper function. */
|
||||
rc = proc_copyin_string(str, sizeof(str), buffer, *lenp);
|
||||
if (rc < 0)
|
||||
SRETURN(rc);
|
||||
|
||||
spl_kallsyms_lookup_name_fn =
|
||||
(kallsyms_lookup_name_t)simple_strtoul(str, &end, 16);
|
||||
wake_up(&spl_kallsyms_lookup_name_waitq);
|
||||
|
||||
if (str == end)
|
||||
SRETURN(-EINVAL);
|
||||
|
||||
*ppos += *lenp;
|
||||
} else {
|
||||
len = snprintf(str, sizeof(str), "%lx",
|
||||
(unsigned long)spl_kallsyms_lookup_name_fn);
|
||||
if (*ppos >= len)
|
||||
rc = 0;
|
||||
else
|
||||
rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
|
||||
|
||||
if (rc >= 0) {
|
||||
*lenp = rc;
|
||||
*ppos += rc;
|
||||
}
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
}
|
||||
#endif /* HAVE_KALLSYMS_LOOKUP_NAME */
|
||||
|
||||
SPL_PROC_HANDLER(proc_doavailrmem)
|
||||
{
|
||||
int len, rc = 0;
|
||||
char str[32];
|
||||
SENTRY;
|
||||
|
||||
if (write) {
|
||||
*ppos += *lenp;
|
||||
} else {
|
||||
len = snprintf(str, sizeof(str), "%lu",
|
||||
(unsigned long)availrmem);
|
||||
if (*ppos >= len)
|
||||
rc = 0;
|
||||
else
|
||||
rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
|
||||
|
||||
if (rc >= 0) {
|
||||
*lenp = rc;
|
||||
*ppos += rc;
|
||||
}
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
}
|
||||
|
||||
SPL_PROC_HANDLER(proc_dofreemem)
|
||||
{
|
||||
int len, rc = 0;
|
||||
char str[32];
|
||||
SENTRY;
|
||||
|
||||
if (write) {
|
||||
*ppos += *lenp;
|
||||
} else {
|
||||
len = snprintf(str, sizeof(str), "%lu", (unsigned long)freemem);
|
||||
if (*ppos >= len)
|
||||
rc = 0;
|
||||
else
|
||||
rc = proc_copyout_string(buffer,*lenp,str+*ppos,"\n");
|
||||
|
||||
if (rc >= 0) {
|
||||
*lenp = rc;
|
||||
*ppos += rc;
|
||||
}
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_KMEM
|
||||
static void
|
||||
slab_seq_show_headers(struct seq_file *f)
|
||||
|
@ -738,7 +544,6 @@ static struct file_operations proc_slab_operations = {
|
|||
#ifdef DEBUG_LOG
|
||||
static struct ctl_table spl_debug_table[] = {
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_SUBSYS)
|
||||
.procname = "subsystem",
|
||||
.data = &spl_debug_subsys,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -746,7 +551,6 @@ static struct ctl_table spl_debug_table[] = {
|
|||
.proc_handler = &proc_dobitmasks
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_MASK)
|
||||
.procname = "mask",
|
||||
.data = &spl_debug_mask,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -754,7 +558,6 @@ static struct ctl_table spl_debug_table[] = {
|
|||
.proc_handler = &proc_dobitmasks
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_PRINTK)
|
||||
.procname = "printk",
|
||||
.data = &spl_debug_printk,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -762,13 +565,11 @@ static struct ctl_table spl_debug_table[] = {
|
|||
.proc_handler = &proc_dobitmasks
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_MB)
|
||||
.procname = "mb",
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_debug_mb,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_BINARY)
|
||||
.procname = "binary",
|
||||
.data = &spl_debug_binary,
|
||||
.maxlen = sizeof(int),
|
||||
|
@ -776,7 +577,6 @@ static struct ctl_table spl_debug_table[] = {
|
|||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_CATASTROPHE)
|
||||
.procname = "catastrophe",
|
||||
.data = &spl_debug_catastrophe,
|
||||
.maxlen = sizeof(int),
|
||||
|
@ -784,7 +584,6 @@ static struct ctl_table spl_debug_table[] = {
|
|||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_PANIC_ON_BUG)
|
||||
.procname = "panic_on_bug",
|
||||
.data = &spl_debug_panic_on_bug,
|
||||
.maxlen = sizeof(int),
|
||||
|
@ -792,7 +591,6 @@ static struct ctl_table spl_debug_table[] = {
|
|||
.proc_handler = &proc_dointvec
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_PATH)
|
||||
.procname = "path",
|
||||
.data = spl_debug_file_path,
|
||||
.maxlen = sizeof(spl_debug_file_path),
|
||||
|
@ -800,18 +598,16 @@ static struct ctl_table spl_debug_table[] = {
|
|||
.proc_handler = &proc_dostring,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_DUMP)
|
||||
.procname = "dump",
|
||||
.mode = 0200,
|
||||
.proc_handler = &proc_dump_kernel,
|
||||
},
|
||||
{ CTL_NAME (CTL_DEBUG_FORCE_BUG)
|
||||
{
|
||||
.procname = "force_bug",
|
||||
.mode = 0200,
|
||||
.proc_handler = &proc_force_bug,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_CONSOLE_RATELIMIT)
|
||||
.procname = "console_ratelimit",
|
||||
.data = &spl_console_ratelimit,
|
||||
.maxlen = sizeof(int),
|
||||
|
@ -819,28 +615,24 @@ static struct ctl_table spl_debug_table[] = {
|
|||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_CONSOLE_MAX_DELAY_CS)
|
||||
.procname = "console_max_delay_centisecs",
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_console_max_delay_cs,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_CONSOLE_MIN_DELAY_CS)
|
||||
.procname = "console_min_delay_centisecs",
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_console_min_delay_cs,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_CONSOLE_BACKOFF)
|
||||
.procname = "console_backoff",
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_console_backoff,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_DEBUG_STACK_SIZE)
|
||||
.procname = "stack_max",
|
||||
.data = &spl_debug_stack,
|
||||
.maxlen = sizeof(int),
|
||||
|
@ -851,84 +643,9 @@ static struct ctl_table spl_debug_table[] = {
|
|||
};
|
||||
#endif /* DEBUG_LOG */
|
||||
|
||||
static struct ctl_table spl_vm_table[] = {
|
||||
{
|
||||
CTL_NAME (CTL_VM_MINFREE)
|
||||
.procname = "minfree",
|
||||
.data = &minfree,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_VM_DESFREE)
|
||||
.procname = "desfree",
|
||||
.data = &desfree,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_VM_LOTSFREE)
|
||||
.procname = "lotsfree",
|
||||
.data = &lotsfree,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_VM_NEEDFREE)
|
||||
.procname = "needfree",
|
||||
.data = &needfree,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_VM_SWAPFS_MINFREE)
|
||||
.procname = "swapfs_minfree",
|
||||
.data = &swapfs_minfree,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_VM_SWAPFS_RESERVE)
|
||||
.procname = "swapfs_reserve",
|
||||
.data = &swapfs_reserve,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_VM_AVAILRMEM)
|
||||
.procname = "availrmem",
|
||||
.mode = 0444,
|
||||
.proc_handler = &proc_doavailrmem,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_VM_FREEMEM)
|
||||
.procname = "freemem",
|
||||
.data = (void *)2,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = &proc_dofreemem,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_VM_PHYSMEM)
|
||||
.procname = "physmem",
|
||||
.data = &physmem,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0444,
|
||||
.proc_handler = &proc_dointvec,
|
||||
},
|
||||
{0},
|
||||
};
|
||||
|
||||
#ifdef DEBUG_KMEM
|
||||
static struct ctl_table spl_kmem_table[] = {
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_KMEMUSED)
|
||||
.procname = "kmem_used",
|
||||
.data = &kmem_alloc_used,
|
||||
# ifdef HAVE_ATOMIC64_T
|
||||
|
@ -940,7 +657,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_domemused,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_KMEMMAX)
|
||||
.procname = "kmem_max",
|
||||
.data = &kmem_alloc_max,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -950,7 +666,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_doulongvec_minmax,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_VMEMUSED)
|
||||
.procname = "vmem_used",
|
||||
.data = &vmem_alloc_used,
|
||||
# ifdef HAVE_ATOMIC64_T
|
||||
|
@ -962,7 +677,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_domemused,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_VMEMMAX)
|
||||
.procname = "vmem_max",
|
||||
.data = &vmem_alloc_max,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -972,7 +686,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_doulongvec_minmax,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_SLAB_KMEMTOTAL)
|
||||
.procname = "slab_kmem_total",
|
||||
.data = (void *)(KMC_KMEM | KMC_TOTAL),
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -982,7 +695,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_doslab,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_SLAB_KMEMALLOC)
|
||||
.procname = "slab_kmem_alloc",
|
||||
.data = (void *)(KMC_KMEM | KMC_ALLOC),
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -992,7 +704,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_doslab,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_SLAB_KMEMMAX)
|
||||
.procname = "slab_kmem_max",
|
||||
.data = (void *)(KMC_KMEM | KMC_MAX),
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -1002,7 +713,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_doslab,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_SLAB_VMEMTOTAL)
|
||||
.procname = "slab_vmem_total",
|
||||
.data = (void *)(KMC_VMEM | KMC_TOTAL),
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -1012,7 +722,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_doslab,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_SLAB_VMEMALLOC)
|
||||
.procname = "slab_vmem_alloc",
|
||||
.data = (void *)(KMC_VMEM | KMC_ALLOC),
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -1022,7 +731,6 @@ static struct ctl_table spl_kmem_table[] = {
|
|||
.proc_handler = &proc_doslab,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_KMEM_SLAB_VMEMMAX)
|
||||
.procname = "slab_vmem_max",
|
||||
.data = (void *)(KMC_VMEM | KMC_MAX),
|
||||
.maxlen = sizeof(unsigned long),
|
||||
|
@ -1044,7 +752,6 @@ static struct ctl_table spl_table[] = {
|
|||
* sysctl(8) prefers to go via /proc for portability.
|
||||
*/
|
||||
{
|
||||
CTL_NAME (CTL_VERSION)
|
||||
.procname = "version",
|
||||
.data = spl_version,
|
||||
.maxlen = sizeof(spl_version),
|
||||
|
@ -1052,47 +759,27 @@ static struct ctl_table spl_table[] = {
|
|||
.proc_handler = &proc_dostring,
|
||||
},
|
||||
{
|
||||
CTL_NAME (CTL_HOSTID)
|
||||
.procname = "hostid",
|
||||
.data = &spl_hostid,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dohostid,
|
||||
},
|
||||
#ifndef HAVE_KALLSYMS_LOOKUP_NAME
|
||||
{
|
||||
CTL_NAME (CTL_KALLSYMS)
|
||||
.procname = "kallsyms_lookup_name",
|
||||
.data = &spl_kallsyms_lookup_name_fn,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
.mode = 0644,
|
||||
.proc_handler = &proc_dokallsyms_lookup_name,
|
||||
},
|
||||
#endif
|
||||
#ifdef DEBUG_LOG
|
||||
{
|
||||
CTL_NAME (CTL_SPL_DEBUG)
|
||||
.procname = "debug",
|
||||
.mode = 0555,
|
||||
.child = spl_debug_table,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
CTL_NAME (CTL_SPL_VM)
|
||||
.procname = "vm",
|
||||
.mode = 0555,
|
||||
.child = spl_vm_table,
|
||||
},
|
||||
#ifdef DEBUG_KMEM
|
||||
{
|
||||
CTL_NAME (CTL_SPL_KMEM)
|
||||
.procname = "kmem",
|
||||
.mode = 0555,
|
||||
.child = spl_kmem_table,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
CTL_NAME (CTL_SPL_KSTAT)
|
||||
.procname = "kstat",
|
||||
.mode = 0555,
|
||||
.child = spl_kstat_table,
|
||||
|
@ -1102,7 +789,6 @@ static struct ctl_table spl_table[] = {
|
|||
|
||||
static struct ctl_table spl_dir[] = {
|
||||
{
|
||||
CTL_NAME (CTL_SPL)
|
||||
.procname = "spl",
|
||||
.mode = 0555,
|
||||
.child = spl_table,
|
||||
|
@ -1112,7 +798,9 @@ static struct ctl_table spl_dir[] = {
|
|||
|
||||
static struct ctl_table spl_root[] = {
|
||||
{
|
||||
CTL_NAME (CTL_KERN)
|
||||
#ifdef HAVE_CTL_NAME
|
||||
.ctl_name = CTL_KERN,
|
||||
#endif
|
||||
.procname = "kernel",
|
||||
.mode = 0555,
|
||||
.child = spl_dir,
|
||||
|
@ -1126,11 +814,9 @@ spl_proc_init(void)
|
|||
int rc = 0;
|
||||
SENTRY;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
spl_header = spl_register_sysctl_table(spl_root, 0);
|
||||
spl_header = register_sysctl_table(spl_root);
|
||||
if (spl_header == NULL)
|
||||
SRETURN(-EUNATCH);
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
proc_spl = proc_mkdir("spl", NULL);
|
||||
if (proc_spl == NULL)
|
||||
|
@ -1159,9 +845,7 @@ out:
|
|||
remove_proc_entry("kmem", proc_spl);
|
||||
#endif
|
||||
remove_proc_entry("spl", NULL);
|
||||
#ifdef CONFIG_SYSCTL
|
||||
spl_unregister_sysctl_table(spl_header);
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
unregister_sysctl_table(spl_header);
|
||||
}
|
||||
|
||||
SRETURN(rc);
|
||||
|
@ -1179,10 +863,8 @@ spl_proc_fini(void)
|
|||
#endif
|
||||
remove_proc_entry("spl", NULL);
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
ASSERT(spl_header != NULL);
|
||||
spl_unregister_sysctl_table(spl_header);
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
unregister_sysctl_table(spl_header);
|
||||
|
||||
SEXIT;
|
||||
}
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
/*****************************************************************************\
|
||||
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
|
||||
* Copyright (C) 2007 The Regents of the University of California.
|
||||
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
|
||||
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
|
||||
* UCRL-CODE-235197
|
||||
*
|
||||
* This file is part of the SPL, Solaris Porting Layer.
|
||||
* For details, see <http://zfsonlinux.org/>.
|
||||
*
|
||||
* The SPL is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* The SPL is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
|
||||
*****************************************************************************
|
||||
* Solaris Porting Layer (SPL) Time Implementation.
|
||||
\*****************************************************************************/
|
||||
|
||||
#include <sys/sysmacros.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#ifdef HAVE_MONOTONIC_CLOCK
|
||||
extern unsigned long long monotonic_clock(void);
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_SUBSYSTEM
|
||||
#undef DEBUG_SUBSYSTEM
|
||||
#endif
|
||||
|
||||
#define DEBUG_SUBSYSTEM S_TIME
|
||||
|
||||
void
|
||||
__gethrestime(timestruc_t *ts)
|
||||
{
|
||||
struct timespec tspec;
|
||||
|
||||
getnstimeofday(&tspec);
|
||||
|
||||
ts->tv_sec = tspec.tv_sec;
|
||||
ts->tv_nsec = tspec.tv_nsec;
|
||||
}
|
||||
EXPORT_SYMBOL(__gethrestime);
|
||||
|
||||
/* Use monotonic_clock() by default. It's faster and is available on older
|
||||
* kernels, but few architectures have them, so we must fallback to
|
||||
* do_posix_clock_monotonic_gettime().
|
||||
*/
|
||||
hrtime_t
|
||||
__gethrtime(void) {
|
||||
#ifdef HAVE_MONOTONIC_CLOCK
|
||||
unsigned long long res = monotonic_clock();
|
||||
|
||||
/* Deal with signed/unsigned mismatch */
|
||||
return (hrtime_t)(res & ~(1ULL << 63));
|
||||
#else
|
||||
struct timespec ts;
|
||||
|
||||
do_posix_clock_monotonic_gettime(&ts);
|
||||
return (((hrtime_t)ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(__gethrtime);
|
||||
|
||||
/* set_normalized_timespec() API changes
|
||||
* 2.6.0 - 2.6.15: Inline function provided by linux/time.h
|
||||
* 2.6.16 - 2.6.25: Function prototype defined but not exported
|
||||
* 2.6.26 - 2.6.x: Function defined and exported
|
||||
*/
|
||||
#if !defined(HAVE_SET_NORMALIZED_TIMESPEC_INLINE) && \
|
||||
!defined(HAVE_SET_NORMALIZED_TIMESPEC_EXPORT)
|
||||
void
|
||||
set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
|
||||
{
|
||||
while (nsec >= NSEC_PER_SEC) {
|
||||
nsec -= NSEC_PER_SEC;
|
||||
++sec;
|
||||
}
|
||||
while (nsec < 0) {
|
||||
nsec += NSEC_PER_SEC;
|
||||
--sec;
|
||||
}
|
||||
ts->tv_sec = sec;
|
||||
ts->tv_nsec = nsec;
|
||||
}
|
||||
EXPORT_SYMBOL(set_normalized_timespec);
|
||||
#endif
|
|
@ -296,22 +296,6 @@ vn_seek(vnode_t *vp, offset_t ooff, offset_t *noffp, void *ct)
|
|||
}
|
||||
EXPORT_SYMBOL(vn_seek);
|
||||
|
||||
/*
|
||||
* kern_path() was introduced in Linux 2.6.28. We duplicate it as a
|
||||
* compatibility shim for earlier kernels.
|
||||
*/
|
||||
#ifndef HAVE_KERN_PATH
|
||||
int
|
||||
kern_path(const char *name, unsigned int flags, struct path *path)
|
||||
{
|
||||
struct nameidata nd;
|
||||
int rc = path_lookup(name, flags, &nd);
|
||||
if (!rc)
|
||||
*path = nd.path;
|
||||
return rc;
|
||||
}
|
||||
#endif /* HAVE_KERN_PATH */
|
||||
|
||||
/*
|
||||
* spl_basename() takes a NULL-terminated string s as input containing a path.
|
||||
* It returns a char pointer to a string and a length that describe the
|
||||
|
@ -381,7 +365,7 @@ spl_kern_path_locked(const char *name, struct path *path)
|
|||
if (rc)
|
||||
return (ERR_PTR(rc));
|
||||
|
||||
spl_inode_lock_nested(parent.dentry->d_inode, I_MUTEX_PARENT);
|
||||
spl_inode_lock(parent.dentry->d_inode);
|
||||
|
||||
dentry = lookup_one_len(basename, parent.dentry, len);
|
||||
if (IS_ERR(dentry)) {
|
||||
|
@ -766,43 +750,37 @@ vn_releasef(int fd)
|
|||
} /* releasef() */
|
||||
EXPORT_SYMBOL(releasef);
|
||||
|
||||
#ifndef HAVE_SET_FS_PWD
|
||||
void
|
||||
# ifdef HAVE_SET_FS_PWD_WITH_CONST
|
||||
set_fs_pwd(struct fs_struct *fs, const struct path *path)
|
||||
# else
|
||||
set_fs_pwd(struct fs_struct *fs, struct path *path)
|
||||
# endif
|
||||
static void
|
||||
#ifdef HAVE_SET_FS_PWD_WITH_CONST
|
||||
vn_set_fs_pwd(struct fs_struct *fs, const struct path *path)
|
||||
#else
|
||||
vn_set_fs_pwd(struct fs_struct *fs, struct path *path)
|
||||
#endif /* HAVE_SET_FS_PWD_WITH_CONST */
|
||||
{
|
||||
struct path old_pwd;
|
||||
|
||||
# ifdef HAVE_FS_STRUCT_SPINLOCK
|
||||
#ifdef HAVE_FS_STRUCT_SPINLOCK
|
||||
spin_lock(&fs->lock);
|
||||
old_pwd = fs->pwd;
|
||||
fs->pwd = *path;
|
||||
path_get(path);
|
||||
spin_unlock(&fs->lock);
|
||||
# else
|
||||
#else
|
||||
write_lock(&fs->lock);
|
||||
old_pwd = fs->pwd;
|
||||
fs->pwd = *path;
|
||||
path_get(path);
|
||||
write_unlock(&fs->lock);
|
||||
# endif /* HAVE_FS_STRUCT_SPINLOCK */
|
||||
#endif /* HAVE_FS_STRUCT_SPINLOCK */
|
||||
|
||||
if (old_pwd.dentry)
|
||||
path_put(&old_pwd);
|
||||
}
|
||||
#endif /* HAVE_SET_FS_PWD */
|
||||
|
||||
int
|
||||
vn_set_pwd(const char *filename)
|
||||
{
|
||||
#ifdef HAVE_USER_PATH_DIR
|
||||
struct path path;
|
||||
#else
|
||||
struct nameidata nd;
|
||||
#endif /* HAVE_USER_PATH_DIR */
|
||||
mm_segment_t saved_fs;
|
||||
int rc;
|
||||
SENTRY;
|
||||
|
@ -815,7 +793,6 @@ vn_set_pwd(const char *filename)
|
|||
saved_fs = get_fs();
|
||||
set_fs(get_ds());
|
||||
|
||||
# ifdef HAVE_USER_PATH_DIR
|
||||
rc = user_path_dir(filename, &path);
|
||||
if (rc)
|
||||
SGOTO(out, rc);
|
||||
|
@ -824,25 +801,10 @@ vn_set_pwd(const char *filename)
|
|||
if (rc)
|
||||
SGOTO(dput_and_out, rc);
|
||||
|
||||
set_fs_pwd(current->fs, &path);
|
||||
vn_set_fs_pwd(current->fs, &path);
|
||||
|
||||
dput_and_out:
|
||||
path_put(&path);
|
||||
# else
|
||||
rc = __user_walk(filename,
|
||||
LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_CHDIR, &nd);
|
||||
if (rc)
|
||||
SGOTO(out, rc);
|
||||
|
||||
rc = vfs_permission(&nd, MAY_EXEC);
|
||||
if (rc)
|
||||
SGOTO(dput_and_out, rc);
|
||||
|
||||
set_fs_pwd(current->fs, &nd.path);
|
||||
|
||||
dput_and_out:
|
||||
path_put(&nd.path);
|
||||
# endif /* HAVE_USER_PATH_DIR */
|
||||
out:
|
||||
set_fs(saved_fs);
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include <sys/atomic.h>
|
||||
#include <sys/thread.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include "splat-internal.h"
|
||||
|
||||
|
@ -52,7 +53,7 @@ typedef enum {
|
|||
typedef struct atomic_priv {
|
||||
unsigned long ap_magic;
|
||||
struct file *ap_file;
|
||||
struct mutex ap_lock;
|
||||
kmutex_t ap_lock;
|
||||
wait_queue_head_t ap_waitq;
|
||||
volatile uint64_t ap_atomic;
|
||||
volatile uint64_t ap_atomic_exited;
|
||||
|
@ -70,10 +71,10 @@ splat_atomic_work(void *priv)
|
|||
ap = (atomic_priv_t *)priv;
|
||||
ASSERT(ap->ap_magic == SPLAT_ATOMIC_TEST_MAGIC);
|
||||
|
||||
mutex_lock(&ap->ap_lock);
|
||||
mutex_enter(&ap->ap_lock);
|
||||
op = ap->ap_op;
|
||||
wake_up(&ap->ap_waitq);
|
||||
mutex_unlock(&ap->ap_lock);
|
||||
mutex_exit(&ap->ap_lock);
|
||||
|
||||
splat_vprint(ap->ap_file, SPLAT_ATOMIC_TEST1_NAME,
|
||||
"Thread %d successfully started: %lu/%lu\n", op,
|
||||
|
@ -143,13 +144,13 @@ splat_atomic_test1(struct file *file, void *arg)
|
|||
|
||||
ap.ap_magic = SPLAT_ATOMIC_TEST_MAGIC;
|
||||
ap.ap_file = file;
|
||||
mutex_init(&ap.ap_lock);
|
||||
mutex_init(&ap.ap_lock, SPLAT_ATOMIC_TEST1_NAME, NULL, NULL);
|
||||
init_waitqueue_head(&ap.ap_waitq);
|
||||
ap.ap_atomic = SPLAT_ATOMIC_INIT_VALUE;
|
||||
ap.ap_atomic_exited = 0;
|
||||
|
||||
for (i = 0; i < SPLAT_ATOMIC_COUNT_64; i++) {
|
||||
mutex_lock(&ap.ap_lock);
|
||||
mutex_enter(&ap.ap_lock);
|
||||
ap.ap_op = i;
|
||||
|
||||
thr = (kthread_t *)thread_create(NULL, 0, splat_atomic_work,
|
||||
|
@ -157,14 +158,14 @@ splat_atomic_test1(struct file *file, void *arg)
|
|||
minclsyspri);
|
||||
if (thr == NULL) {
|
||||
rc = -ESRCH;
|
||||
mutex_unlock(&ap.ap_lock);
|
||||
mutex_exit(&ap.ap_lock);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Prepare to wait, the new thread will wake us once it
|
||||
* has made a copy of the unique private passed data */
|
||||
prepare_to_wait(&ap.ap_waitq, &wait, TASK_UNINTERRUPTIBLE);
|
||||
mutex_unlock(&ap.ap_lock);
|
||||
mutex_exit(&ap.ap_lock);
|
||||
schedule();
|
||||
}
|
||||
|
||||
|
@ -187,6 +188,8 @@ splat_atomic_test1(struct file *file, void *arg)
|
|||
"Success initial and final values match, %lu == %lu\n",
|
||||
(long unsigned)ap.ap_atomic, SPLAT_ATOMIC_INIT_VALUE);
|
||||
|
||||
mutex_destroy(&ap.ap_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -49,29 +49,25 @@
|
|||
#include <linux/cdev.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/debug.h>
|
||||
#include <sys/mutex.h>
|
||||
#include "splat-internal.h"
|
||||
|
||||
static spl_class *splat_class;
|
||||
static spl_device *splat_device;
|
||||
static struct list_head splat_module_list;
|
||||
static spinlock_t splat_module_lock;
|
||||
|
||||
static int
|
||||
splat_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned int minor = iminor(inode);
|
||||
splat_info_t *info;
|
||||
|
||||
if (minor >= SPLAT_MINORS)
|
||||
return -ENXIO;
|
||||
|
||||
info = (splat_info_t *)kmalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (info == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&info->info_lock);
|
||||
mutex_init(&info->info_lock, SPLAT_NAME, MUTEX_DEFAULT, NULL);
|
||||
info->info_size = SPLAT_INFO_BUFFER_SIZE;
|
||||
info->info_buffer = (char *)vmalloc(SPLAT_INFO_BUFFER_SIZE);
|
||||
if (info->info_buffer == NULL) {
|
||||
|
@ -91,12 +87,8 @@ splat_open(struct inode *inode, struct file *file)
|
|||
static int
|
||||
splat_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned int minor = iminor(inode);
|
||||
splat_info_t *info = (splat_info_t *)file->private_data;
|
||||
|
||||
if (minor >= SPLAT_MINORS)
|
||||
return -ENXIO;
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
|
||||
|
@ -115,10 +107,10 @@ splat_buffer_clear(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
|
|||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
|
||||
mutex_lock(&info->info_lock);
|
||||
mutex_enter(&info->info_lock);
|
||||
memset(info->info_buffer, 0, info->info_size);
|
||||
info->info_head = info->info_buffer;
|
||||
mutex_unlock(&info->info_lock);
|
||||
mutex_exit(&info->info_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -133,7 +125,7 @@ splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
|
|||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
|
||||
mutex_lock(&info->info_lock);
|
||||
mutex_enter(&info->info_lock);
|
||||
if (kcfg->cfg_arg1 > 0) {
|
||||
|
||||
size = kcfg->cfg_arg1;
|
||||
|
@ -158,7 +150,7 @@ splat_buffer_size(struct file *file, splat_cfg_t *kcfg, unsigned long arg)
|
|||
if (copy_to_user((struct splat_cfg_t __user *)arg, kcfg, sizeof(*kcfg)))
|
||||
rc = -EFAULT;
|
||||
out:
|
||||
mutex_unlock(&info->info_lock);
|
||||
mutex_exit(&info->info_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -457,16 +449,12 @@ splat_ioctl_cmd(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
static long
|
||||
splat_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
unsigned int minor = iminor(file->f_dentry->d_inode);
|
||||
int rc = 0;
|
||||
|
||||
/* Ignore tty ioctls */
|
||||
if ((cmd & 0xffffff00) == ((int)'T') << 8)
|
||||
return -ENOTTY;
|
||||
|
||||
if (minor >= SPLAT_MINORS)
|
||||
return -ENXIO;
|
||||
|
||||
switch (cmd) {
|
||||
case SPLAT_CFG:
|
||||
rc = splat_ioctl_cfg(file, cmd, arg);
|
||||
|
@ -499,17 +487,13 @@ splat_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||
static ssize_t splat_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
unsigned int minor = iminor(file->f_dentry->d_inode);
|
||||
splat_info_t *info = (splat_info_t *)file->private_data;
|
||||
int rc = 0;
|
||||
|
||||
if (minor >= SPLAT_MINORS)
|
||||
return -ENXIO;
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
|
||||
mutex_lock(&info->info_lock);
|
||||
mutex_enter(&info->info_lock);
|
||||
|
||||
/* Write beyond EOF */
|
||||
if (*ppos >= info->info_size) {
|
||||
|
@ -529,24 +513,20 @@ static ssize_t splat_write(struct file *file, const char __user *buf,
|
|||
*ppos += count;
|
||||
rc = count;
|
||||
out:
|
||||
mutex_unlock(&info->info_lock);
|
||||
mutex_exit(&info->info_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t splat_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
unsigned int minor = iminor(file->f_dentry->d_inode);
|
||||
splat_info_t *info = (splat_info_t *)file->private_data;
|
||||
int rc = 0;
|
||||
|
||||
if (minor >= SPLAT_MINORS)
|
||||
return -ENXIO;
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
|
||||
mutex_lock(&info->info_lock);
|
||||
mutex_enter(&info->info_lock);
|
||||
|
||||
/* Read beyond EOF */
|
||||
if (*ppos >= info->info_size)
|
||||
|
@ -564,23 +544,19 @@ static ssize_t splat_read(struct file *file, char __user *buf,
|
|||
*ppos += count;
|
||||
rc = count;
|
||||
out:
|
||||
mutex_unlock(&info->info_lock);
|
||||
mutex_exit(&info->info_lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static loff_t splat_seek(struct file *file, loff_t offset, int origin)
|
||||
{
|
||||
unsigned int minor = iminor(file->f_dentry->d_inode);
|
||||
splat_info_t *info = (splat_info_t *)file->private_data;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (minor >= SPLAT_MINORS)
|
||||
return -ENXIO;
|
||||
|
||||
ASSERT(info);
|
||||
ASSERT(info->info_buffer);
|
||||
|
||||
mutex_lock(&info->info_lock);
|
||||
mutex_enter(&info->info_lock);
|
||||
|
||||
switch (origin) {
|
||||
case 0: /* SEEK_SET - No-op just do it */
|
||||
|
@ -599,12 +575,11 @@ static loff_t splat_seek(struct file *file, loff_t offset, int origin)
|
|||
rc = offset;
|
||||
}
|
||||
|
||||
mutex_unlock(&info->info_lock);
|
||||
mutex_exit(&info->info_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct cdev splat_cdev;
|
||||
static struct file_operations splat_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = splat_open,
|
||||
|
@ -618,11 +593,16 @@ static struct file_operations splat_fops = {
|
|||
.llseek = splat_seek,
|
||||
};
|
||||
|
||||
static struct miscdevice splat_misc = {
|
||||
.minor = MISC_DYNAMIC_MINOR,
|
||||
.name = SPLAT_NAME,
|
||||
.fops = &splat_fops,
|
||||
};
|
||||
|
||||
static int
|
||||
splat_init(void)
|
||||
{
|
||||
dev_t dev;
|
||||
int rc;
|
||||
int error;
|
||||
|
||||
spin_lock_init(&splat_module_lock);
|
||||
INIT_LIST_HEAD(&splat_module_list);
|
||||
|
@ -644,52 +624,25 @@ splat_init(void)
|
|||
SPLAT_SUBSYSTEM_INIT(zlib);
|
||||
SPLAT_SUBSYSTEM_INIT(linux);
|
||||
|
||||
dev = MKDEV(SPLAT_MAJOR, 0);
|
||||
if ((rc = register_chrdev_region(dev, SPLAT_MINORS, SPLAT_NAME)))
|
||||
goto error;
|
||||
|
||||
/* Support for registering a character driver */
|
||||
cdev_init(&splat_cdev, &splat_fops);
|
||||
splat_cdev.owner = THIS_MODULE;
|
||||
kobject_set_name(&splat_cdev.kobj, SPLAT_NAME);
|
||||
if ((rc = cdev_add(&splat_cdev, dev, SPLAT_MINORS))) {
|
||||
printk(KERN_ERR "SPLAT: Error adding cdev, %d\n", rc);
|
||||
kobject_put(&splat_cdev.kobj);
|
||||
unregister_chrdev_region(dev, SPLAT_MINORS);
|
||||
goto error;
|
||||
error = misc_register(&splat_misc);
|
||||
if (error) {
|
||||
printk(KERN_INFO "SPLAT: misc_register() failed %d\n", error);
|
||||
} else {
|
||||
printk(KERN_INFO "SPLAT: Loaded module v%s-%s%s\n",
|
||||
SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
|
||||
}
|
||||
|
||||
/* Support for udev make driver info available in sysfs */
|
||||
splat_class = spl_class_create(THIS_MODULE, "splat");
|
||||
if (IS_ERR(splat_class)) {
|
||||
rc = PTR_ERR(splat_class);
|
||||
printk(KERN_ERR "SPLAT: Error creating splat class, %d\n", rc);
|
||||
cdev_del(&splat_cdev);
|
||||
unregister_chrdev_region(dev, SPLAT_MINORS);
|
||||
goto error;
|
||||
}
|
||||
|
||||
splat_device = spl_device_create(splat_class, NULL,
|
||||
MKDEV(SPLAT_MAJOR, 0),
|
||||
NULL, SPLAT_NAME);
|
||||
|
||||
printk(KERN_INFO "SPLAT: Loaded module v%s-%s%s\n",
|
||||
SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
|
||||
return 0;
|
||||
error:
|
||||
printk(KERN_ERR "SPLAT: Error registering splat device, %d\n", rc);
|
||||
return rc;
|
||||
return (error);
|
||||
}
|
||||
|
||||
static int
|
||||
splat_fini(void)
|
||||
{
|
||||
dev_t dev = MKDEV(SPLAT_MAJOR, 0);
|
||||
int error;
|
||||
|
||||
spl_device_destroy(splat_class, splat_device, dev);
|
||||
spl_class_destroy(splat_class);
|
||||
cdev_del(&splat_cdev);
|
||||
unregister_chrdev_region(dev, SPLAT_MINORS);
|
||||
error = misc_deregister(&splat_misc);
|
||||
if (error)
|
||||
printk(KERN_INFO "SPLAT: misc_deregister() failed %d\n", error);
|
||||
|
||||
SPLAT_SUBSYSTEM_FINI(linux);
|
||||
SPLAT_SUBSYSTEM_FINI(zlib);
|
||||
|
@ -710,15 +663,15 @@ splat_fini(void)
|
|||
|
||||
ASSERT(list_empty(&splat_module_list));
|
||||
printk(KERN_INFO "SPLAT: Unloaded module v%s-%s%s\n",
|
||||
SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
|
||||
SPL_META_VERSION, SPL_META_RELEASE, SPL_DEBUG_STR);
|
||||
|
||||
return 0;
|
||||
return (0);
|
||||
}
|
||||
|
||||
spl_module_init(splat_init);
|
||||
spl_module_exit(splat_fini);
|
||||
|
||||
MODULE_AUTHOR("Lawrence Livermore National Labs");
|
||||
MODULE_DESCRIPTION("Solaris Porting LAyer Tests");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR(SPL_META_AUTHOR);
|
||||
MODULE_LICENSE(SPL_META_LICENSE);
|
||||
MODULE_VERSION(SPL_META_VERSION "-" SPL_META_RELEASE);
|
||||
|
|
|
@ -25,9 +25,9 @@
|
|||
#ifndef _SPLAT_INTERNAL_H
|
||||
#define _SPLAT_INTERNAL_H
|
||||
|
||||
#include "spl-device.h"
|
||||
#include "spl-debug.h"
|
||||
#include "splat-ctl.h"
|
||||
#include <sys/mutex.h>
|
||||
|
||||
#define SPLAT_SUBSYSTEM_INIT(type) \
|
||||
({ splat_subsystem_t *_sub_; \
|
||||
|
@ -121,7 +121,7 @@ typedef struct splat_subsystem {
|
|||
#define SPLAT_INFO_BUFFER_REDZONE 256
|
||||
|
||||
typedef struct splat_info {
|
||||
struct mutex info_lock;
|
||||
kmutex_t info_lock;
|
||||
int info_size;
|
||||
char *info_buffer;
|
||||
char *info_head; /* Internal kernel use only */
|
||||
|
@ -136,7 +136,7 @@ typedef struct splat_info {
|
|||
ASSERT(_info_); \
|
||||
ASSERT(_info_->info_buffer); \
|
||||
\
|
||||
mutex_lock(&_info_->info_lock); \
|
||||
mutex_enter(&_info_->info_lock); \
|
||||
\
|
||||
/* Don't allow the kernel to start a write in the red zone */ \
|
||||
if ((int)(_info_->info_head - _info_->info_buffer) > \
|
||||
|
@ -148,7 +148,7 @@ typedef struct splat_info {
|
|||
_info_->info_head += _rc_; \
|
||||
} \
|
||||
\
|
||||
mutex_unlock(&_info_->info_lock); \
|
||||
mutex_exit(&_info_->info_lock); \
|
||||
_rc_; \
|
||||
})
|
||||
|
||||
|
|
|
@ -77,10 +77,6 @@
|
|||
#define SPLAT_KMEM_TEST11_DESC "Slab memory overcommit test"
|
||||
#endif
|
||||
|
||||
#define SPLAT_KMEM_TEST12_ID 0x010c
|
||||
#define SPLAT_KMEM_TEST12_NAME "vmem_size"
|
||||
#define SPLAT_KMEM_TEST12_DESC "Memory zone test"
|
||||
|
||||
#define SPLAT_KMEM_TEST13_ID 0x010d
|
||||
#define SPLAT_KMEM_TEST13_NAME "slab_reclaim"
|
||||
#define SPLAT_KMEM_TEST13_DESC "Slab direct memory reclaim test"
|
||||
|
@ -1056,9 +1052,8 @@ splat_kmem_test10(struct file *file, void *arg)
|
|||
|
||||
for (alloc = 1; alloc <= 1024; alloc *= 2) {
|
||||
|
||||
/* Skip tests which exceed available memory. We
|
||||
* leverage availrmem here for some extra testing */
|
||||
if (size * alloc * SPLAT_KMEM_THREADS > availrmem / 2)
|
||||
/* Skip tests which exceed 1/2 of physical memory. */
|
||||
if (size * alloc * SPLAT_KMEM_THREADS > physmem / 2)
|
||||
continue;
|
||||
|
||||
rc = splat_kmem_cache_thread_test(file, arg,
|
||||
|
@ -1104,84 +1099,6 @@ splat_kmem_test11(struct file *file, void *arg)
|
|||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Check vmem_size() behavior by acquiring the alloc/free/total vmem
|
||||
* space, then allocate a known buffer size from vmem space. We can
|
||||
* then check that vmem_size() values were updated properly with in
|
||||
* a fairly small tolerence. The tolerance is important because we
|
||||
* are not the only vmem consumer on the system. Other unrelated
|
||||
* allocations might occur during the small test window. The vmem
|
||||
* allocation itself may also add in a little extra private space to
|
||||
* the buffer. Finally, verify total space always remains unchanged.
|
||||
*/
|
||||
static int
|
||||
splat_kmem_test12(struct file *file, void *arg)
|
||||
{
|
||||
size_t alloc1, free1, total1;
|
||||
size_t alloc2, free2, total2;
|
||||
int size = 8*1024*1024;
|
||||
void *ptr;
|
||||
|
||||
alloc1 = vmem_size(NULL, VMEM_ALLOC);
|
||||
free1 = vmem_size(NULL, VMEM_FREE);
|
||||
total1 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
|
||||
"free=%lu total=%lu\n", (unsigned long)alloc1,
|
||||
(unsigned long)free1, (unsigned long)total1);
|
||||
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Alloc %d bytes\n", size);
|
||||
ptr = vmem_alloc(size, KM_SLEEP);
|
||||
if (!ptr) {
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
|
||||
"Failed to alloc %d bytes\n", size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
alloc2 = vmem_size(NULL, VMEM_ALLOC);
|
||||
free2 = vmem_size(NULL, VMEM_FREE);
|
||||
total2 = vmem_size(NULL, VMEM_ALLOC | VMEM_FREE);
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Vmem alloc=%lu "
|
||||
"free=%lu total=%lu\n", (unsigned long)alloc2,
|
||||
(unsigned long)free2, (unsigned long)total2);
|
||||
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Free %d bytes\n", size);
|
||||
vmem_free(ptr, size);
|
||||
if (alloc2 < (alloc1 + size - (size / 100)) ||
|
||||
alloc2 > (alloc1 + size + (size / 100))) {
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
|
||||
"VMEM_ALLOC size: %lu != %lu+%d (+/- 1%%)\n",
|
||||
(unsigned long)alloc2,(unsigned long)alloc1,size);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (free2 < (free1 - size - (size / 100)) ||
|
||||
free2 > (free1 - size + (size / 100))) {
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
|
||||
"VMEM_FREE size: %lu != %lu-%d (+/- 1%%)\n",
|
||||
(unsigned long)free2, (unsigned long)free1, size);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (total1 != total2) {
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME, "Failed "
|
||||
"VMEM_ALLOC | VMEM_FREE not constant: "
|
||||
"%lu != %lu\n", (unsigned long)total2,
|
||||
(unsigned long)total1);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
|
||||
"VMEM_ALLOC within tolerance: ~%ld%% (%ld/%d)\n",
|
||||
(long)abs(alloc1 + (long)size - alloc2) * 100 / (long)size,
|
||||
(long)abs(alloc1 + (long)size - alloc2), size);
|
||||
splat_vprint(file, SPLAT_KMEM_TEST12_NAME,
|
||||
"VMEM_FREE within tolerance: ~%ld%% (%ld/%d)\n",
|
||||
(long)abs((free1 - (long)size) - free2) * 100 / (long)size,
|
||||
(long)abs((free1 - (long)size) - free2), size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef struct dummy_page {
|
||||
struct list_head dp_list;
|
||||
char dp_pad[PAGE_SIZE - sizeof(struct list_head)];
|
||||
|
@ -1360,8 +1277,6 @@ splat_kmem_init(void)
|
|||
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST11_NAME, SPLAT_KMEM_TEST11_DESC,
|
||||
SPLAT_KMEM_TEST11_ID, splat_kmem_test11);
|
||||
#endif
|
||||
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST12_NAME, SPLAT_KMEM_TEST12_DESC,
|
||||
SPLAT_KMEM_TEST12_ID, splat_kmem_test12);
|
||||
SPLAT_TEST_INIT(sub, SPLAT_KMEM_TEST13_NAME, SPLAT_KMEM_TEST13_DESC,
|
||||
SPLAT_KMEM_TEST13_ID, splat_kmem_test13);
|
||||
|
||||
|
@ -1373,7 +1288,6 @@ splat_kmem_fini(splat_subsystem_t *sub)
|
|||
{
|
||||
ASSERT(sub);
|
||||
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST13_ID);
|
||||
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST12_ID);
|
||||
#if 0
|
||||
SPLAT_TEST_FINI(sub, SPLAT_KMEM_TEST11_ID);
|
||||
#endif
|
||||
|
|
|
@ -30,61 +30,8 @@
|
|||
#define SPLAT_LINUX_DESC "Kernel Compatibility Tests"
|
||||
|
||||
#define SPLAT_LINUX_TEST1_ID 0x1001
|
||||
#define SPLAT_LINUX_TEST1_NAME "shrink_dcache"
|
||||
#define SPLAT_LINUX_TEST1_DESC "Shrink dcache test"
|
||||
|
||||
#define SPLAT_LINUX_TEST2_ID 0x1002
|
||||
#define SPLAT_LINUX_TEST2_NAME "shrink_icache"
|
||||
#define SPLAT_LINUX_TEST2_DESC "Shrink icache test"
|
||||
|
||||
#define SPLAT_LINUX_TEST3_ID 0x1003
|
||||
#define SPLAT_LINUX_TEST3_NAME "shrinker"
|
||||
#define SPLAT_LINUX_TEST3_DESC "Shrinker test"
|
||||
|
||||
|
||||
/*
|
||||
* Attempt to shrink the dcache memory. This is simply a functional
|
||||
* to ensure we can correctly call the shrinker. We don't check that
|
||||
* the cache actually decreased because we have no control over what
|
||||
* else may be running on the system. This avoid false positives.
|
||||
*/
|
||||
static int
|
||||
splat_linux_test1(struct file *file, void *arg)
|
||||
{
|
||||
int remain_before;
|
||||
int remain_after;
|
||||
|
||||
remain_before = shrink_dcache_memory(0, GFP_KERNEL);
|
||||
remain_after = shrink_dcache_memory(KMC_REAP_CHUNK, GFP_KERNEL);
|
||||
|
||||
splat_vprint(file, SPLAT_LINUX_TEST1_NAME,
|
||||
"Shrink dcache memory, remain %d -> %d\n",
|
||||
remain_before, remain_after);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Attempt to shrink the icache memory. This is simply a functional
|
||||
* to ensure we can correctly call the shrinker. We don't check that
|
||||
* the cache actually decreased because we have no control over what
|
||||
* else may be running on the system. This avoid false positives.
|
||||
*/
|
||||
static int
|
||||
splat_linux_test2(struct file *file, void *arg)
|
||||
{
|
||||
int remain_before;
|
||||
int remain_after;
|
||||
|
||||
remain_before = shrink_icache_memory(0, GFP_KERNEL);
|
||||
remain_after = shrink_icache_memory(KMC_REAP_CHUNK, GFP_KERNEL);
|
||||
|
||||
splat_vprint(file, SPLAT_LINUX_TEST2_NAME,
|
||||
"Shrink icache memory, remain %d -> %d\n",
|
||||
remain_before, remain_after);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#define SPLAT_LINUX_TEST1_NAME "shrinker"
|
||||
#define SPLAT_LINUX_TEST1_DESC "Shrinker test"
|
||||
|
||||
/*
|
||||
* Wait queue used to eliminate race between dropping of slab
|
||||
|
@ -117,17 +64,17 @@ __splat_linux_shrinker_fn(struct shrinker *shrink, struct shrink_control *sc)
|
|||
splat_linux_shrinker_size = splat_linux_shrinker_size -
|
||||
MIN(sc->nr_to_scan, splat_linux_shrinker_size);
|
||||
|
||||
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST3_NAME,
|
||||
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
|
||||
"Reclaimed %lu objects, size now %lu\n",
|
||||
sc->nr_to_scan, splat_linux_shrinker_size);
|
||||
} else {
|
||||
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST3_NAME,
|
||||
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
|
||||
"Cache size is %lu\n", splat_linux_shrinker_size);
|
||||
}
|
||||
|
||||
/* Far more calls than expected abort drop_slab as a failsafe */
|
||||
if (failsafe > 100) {
|
||||
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST3_NAME,
|
||||
splat_vprint(splat_linux_shrinker_file, SPLAT_LINUX_TEST1_NAME,
|
||||
"Far more calls than expected (%d), size now %lu\n",
|
||||
failsafe, splat_linux_shrinker_size);
|
||||
return -1;
|
||||
|
@ -168,7 +115,7 @@ splat_linux_drop_slab(struct file *file)
|
|||
|
||||
rc = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
|
||||
if (rc)
|
||||
splat_vprint(file, SPLAT_LINUX_TEST3_NAME,
|
||||
splat_vprint(file, SPLAT_LINUX_TEST1_NAME,
|
||||
"Failed user helper '%s %s %s', rc = %d\n",
|
||||
argv[0], argv[1], argv[2], rc);
|
||||
|
||||
|
@ -185,7 +132,7 @@ splat_linux_drop_slab(struct file *file)
|
|||
* API and this test ensures the compatibility code is correct.
|
||||
*/
|
||||
static int
|
||||
splat_linux_test3(struct file *file, void *arg)
|
||||
splat_linux_test1(struct file *file, void *arg)
|
||||
{
|
||||
int rc = -EINVAL;
|
||||
|
||||
|
@ -196,7 +143,7 @@ splat_linux_test3(struct file *file, void *arg)
|
|||
* use is detected.
|
||||
*/
|
||||
if (splat_linux_shrinker_size || splat_linux_shrinker_file) {
|
||||
splat_vprint(file, SPLAT_LINUX_TEST3_NAME,
|
||||
splat_vprint(file, SPLAT_LINUX_TEST1_NAME,
|
||||
"Failed due to concurrent shrinker test, rc = %d\n", rc);
|
||||
return (rc);
|
||||
}
|
||||
|
@ -218,7 +165,7 @@ splat_linux_test3(struct file *file, void *arg)
|
|||
*/
|
||||
rc = wait_event_timeout(shrinker_wait, !splat_linux_shrinker_size, HZ);
|
||||
if (!rc) {
|
||||
splat_vprint(file, SPLAT_LINUX_TEST3_NAME,
|
||||
splat_vprint(file, SPLAT_LINUX_TEST1_NAME,
|
||||
"Failed cache shrinking timed out, size now %lu",
|
||||
splat_linux_shrinker_size);
|
||||
rc = -ETIMEDOUT;
|
||||
|
@ -227,7 +174,7 @@ splat_linux_test3(struct file *file, void *arg)
|
|||
}
|
||||
|
||||
if (!rc && splat_linux_shrinker_size != 0) {
|
||||
splat_vprint(file, SPLAT_LINUX_TEST3_NAME,
|
||||
splat_vprint(file, SPLAT_LINUX_TEST1_NAME,
|
||||
"Failed cache was not shrunk to 0, size now %lu",
|
||||
splat_linux_shrinker_size);
|
||||
rc = -EDOM;
|
||||
|
@ -260,10 +207,6 @@ splat_linux_init(void)
|
|||
|
||||
SPLAT_TEST_INIT(sub, SPLAT_LINUX_TEST1_NAME, SPLAT_LINUX_TEST1_DESC,
|
||||
SPLAT_LINUX_TEST1_ID, splat_linux_test1);
|
||||
SPLAT_TEST_INIT(sub, SPLAT_LINUX_TEST2_NAME, SPLAT_LINUX_TEST2_DESC,
|
||||
SPLAT_LINUX_TEST2_ID, splat_linux_test2);
|
||||
SPLAT_TEST_INIT(sub, SPLAT_LINUX_TEST3_NAME, SPLAT_LINUX_TEST3_DESC,
|
||||
SPLAT_LINUX_TEST3_ID, splat_linux_test3);
|
||||
|
||||
return sub;
|
||||
}
|
||||
|
@ -272,8 +215,6 @@ void
|
|||
splat_linux_fini(splat_subsystem_t *sub)
|
||||
{
|
||||
ASSERT(sub);
|
||||
SPLAT_TEST_FINI(sub, SPLAT_LINUX_TEST3_ID);
|
||||
SPLAT_TEST_FINI(sub, SPLAT_LINUX_TEST2_ID);
|
||||
SPLAT_TEST_FINI(sub, SPLAT_LINUX_TEST1_ID);
|
||||
|
||||
kfree(sub);
|
||||
|
|
|
@ -1,106 +0,0 @@
|
|||
Required missing symbols for FC11 kernels (2.6.29.4-167.fc11.x86_64)
|
||||
|
||||
* get_vmalloc_info()
|
||||
There is no clean API in the kernel for modules to check the virtual
|
||||
memory state of the system. This information is available in user
|
||||
space under /proc/meminfo and the details for every virtual memory
|
||||
node are available under /proc/vmallocinfo.
|
||||
|
||||
* groups_search()
|
||||
This support is easily replicated if the symbol is not provided by the
|
||||
kernel. However exporting the symbol from the kernel is preferable.
|
||||
This is required by the solaris credential API.
|
||||
|
||||
* task_curr()
|
||||
This symbol is used by the solaris adaptive mutex implementation. If
|
||||
unavailable then all solaris mutexs behave strictly like linux style
|
||||
semaphones. If available then the mutex may spin for a short while,
|
||||
rather than sleep, if the holder of the lock is currently executing.
|
||||
|
||||
* first_online_pgdat()
|
||||
* next_online_pgdat()
|
||||
* next_zone()
|
||||
Required helper functions for the zone iterators for_each_zone() and
|
||||
for_each_populated_zone(). These symbols were previously available
|
||||
in 2.6.17 kernels, marked unused in 2.6.18 kernels, and removed as
|
||||
of the 2.6.19 kernel series. The information is available in user
|
||||
space under /proc/zoneinfo.
|
||||
|
||||
diff --git a/fs/proc/mmu.c b/fs/proc/mmu.c
|
||||
index 8ae221d..081c7b5 100644
|
||||
--- a/fs/proc/mmu.c
|
||||
+++ b/fs/proc/mmu.c
|
||||
@@ -58,3 +58,4 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
|
||||
read_unlock(&vmlist_lock);
|
||||
}
|
||||
}
|
||||
+EXPORT_SYMBOL(get_vmalloc_info);
|
||||
diff --git a/kernel/groups.c b/kernel/groups.c
|
||||
index 2b45b2e..24b62f8 100644
|
||||
--- a/kernel/groups.c
|
||||
+++ b/kernel/groups.c
|
||||
@@ -153,6 +153,7 @@ int groups_search(const struct group_info *group_info, gid_t grp)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
+EXPORT_SYMBOL(groups_search);
|
||||
|
||||
/**
|
||||
* set_groups - Change a group subscription in a set of credentials
|
||||
diff --git a/kernel/sched.c b/kernel/sched.c
|
||||
index 1b59e26..8728c52 100644
|
||||
--- a/kernel/sched.c
|
||||
+++ b/kernel/sched.c
|
||||
@@ -1883,10 +1883,11 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
|
||||
* task_curr - is this task currently executing on a CPU?
|
||||
* @p: the task in question.
|
||||
*/
|
||||
-inline int task_curr(const struct task_struct *p)
|
||||
+task_curr(const struct task_struct *p)
|
||||
{
|
||||
return cpu_curr(task_cpu(p)) == p;
|
||||
}
|
||||
+EXPORT_SYMBOL(task_curr);
|
||||
|
||||
static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
|
||||
{
|
||||
diff --git a/mm/mmzone.c b/mm/mmzone.c
|
||||
index f5b7d17..1468a22 100644
|
||||
--- a/mm/mmzone.c
|
||||
+++ b/mm/mmzone.c
|
||||
@@ -14,6 +14,7 @@ struct pglist_data *first_online_pgdat(void)
|
||||
{
|
||||
return NODE_DATA(first_online_node);
|
||||
}
|
||||
+EXPORT_SYMBOL(first_online_pgdat);
|
||||
|
||||
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
|
||||
{
|
||||
@@ -23,6 +24,7 @@ struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
|
||||
return NULL;
|
||||
return NODE_DATA(nid);
|
||||
}
|
||||
+EXPORT_SYMBOL(next_online_pgdat);
|
||||
|
||||
/*
|
||||
* next_zone - helper magic for for_each_zone()
|
||||
@@ -42,6 +44,7 @@ struct zone *next_zone(struct zone *zone)
|
||||
}
|
||||
return zone;
|
||||
}
|
||||
+EXPORT_SYMBOL(next_zone);
|
||||
|
||||
static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
|
||||
{
|
||||
diff --git a/kernel/fork.c b/kernel/fork.c
|
||||
index 9b42695..852499e 100644
|
||||
--- a/kernel/fork.c
|
||||
+++ b/kernel/fork.c
|
||||
@@ -159,6 +159,7 @@ void __put_task_struct(struct task_struct *tsk)
|
||||
if (!profile_handoff_task(tsk))
|
||||
free_task(tsk);
|
||||
}
|
||||
+EXPORT_SYMBOL(__put_task_struct);
|
||||
|
||||
/*
|
||||
* macro override instead of weak attribute alias, to workaround
|
|
@ -1,91 +0,0 @@
|
|||
Index: linux+rh+chaos/kernel/sched.c
|
||||
===================================================================
|
||||
--- linux+rh+chaos.orig/kernel/sched.c
|
||||
+++ linux+rh+chaos/kernel/sched.c
|
||||
@@ -1034,10 +1034,11 @@ static inline void resched_task(struct t
|
||||
* task_curr - is this task currently executing on a CPU?
|
||||
* @p: the task in question.
|
||||
*/
|
||||
-inline int task_curr(const struct task_struct *p)
|
||||
+int task_curr(const struct task_struct *p)
|
||||
{
|
||||
return cpu_curr(task_cpu(p)) == p;
|
||||
}
|
||||
+EXPORT_SYMBOL(task_curr); /* Request export upstream */
|
||||
|
||||
/* Used instead of source_load when we know the type == 0 */
|
||||
unsigned long weighted_cpuload(const int cpu)
|
||||
Index: linux+rh+chaos/kernel/time.c
|
||||
===================================================================
|
||||
--- linux+rh+chaos.orig/kernel/time.c
|
||||
+++ linux+rh+chaos/kernel/time.c
|
||||
@@ -605,6 +605,7 @@ void set_normalized_timespec(struct time
|
||||
ts->tv_sec = sec;
|
||||
ts->tv_nsec = nsec;
|
||||
}
|
||||
+EXPORT_SYMBOL(set_normalized_timespec); /* Exported as of 2.6.26 */
|
||||
|
||||
/**
|
||||
* ns_to_timespec - Convert nanoseconds to timespec
|
||||
Index: linux+rh+chaos/kernel/kallsyms.c
|
||||
===================================================================
|
||||
--- linux+rh+chaos.orig/kernel/kallsyms.c
|
||||
+++ linux+rh+chaos/kernel/kallsyms.c
|
||||
@@ -154,6 +154,7 @@ unsigned long kallsyms_lookup_name(const
|
||||
}
|
||||
return module_kallsyms_lookup_name(name);
|
||||
}
|
||||
+EXPORT_SYMBOL(kallsyms_lookup_name); /* Exported prior to 2.6.19 */
|
||||
|
||||
/*
|
||||
* Lookup an address
|
||||
Index: linux+rh+chaos/fs/proc/mmu.c
|
||||
===================================================================
|
||||
--- linux+rh+chaos.orig/fs/proc/mmu.c
|
||||
+++ linux+rh+chaos/fs/proc/mmu.c
|
||||
@@ -75,3 +75,4 @@ void get_vmalloc_info(struct vmalloc_inf
|
||||
read_unlock(&vmlist_lock);
|
||||
}
|
||||
}
|
||||
+EXPORT_SYMBOL(get_vmalloc_info); /* Request clean upstream API for this */
|
||||
Index: linux+rh+chaos/mm/mmzone.c
|
||||
===================================================================
|
||||
--- linux+rh+chaos.orig/mm/mmzone.c
|
||||
+++ linux+rh+chaos/mm/mmzone.c
|
||||
@@ -14,7 +14,7 @@ struct pglist_data *first_online_pgdat(v
|
||||
return NODE_DATA(first_online_node);
|
||||
}
|
||||
|
||||
-EXPORT_UNUSED_SYMBOL(first_online_pgdat); /* June 2006 */
|
||||
+EXPORT_SYMBOL(first_online_pgdat); /* Exported prior to 2.6.18 */
|
||||
|
||||
struct pglist_data *next_online_pgdat(struct pglist_data *pgdat)
|
||||
{
|
||||
@@ -24,7 +24,7 @@ struct pglist_data *next_online_pgdat(st
|
||||
return NULL;
|
||||
return NODE_DATA(nid);
|
||||
}
|
||||
-EXPORT_UNUSED_SYMBOL(next_online_pgdat); /* June 2006 */
|
||||
+EXPORT_SYMBOL(next_online_pgdat); /* Exported prior to 2.6.18 */
|
||||
|
||||
|
||||
/*
|
||||
@@ -45,5 +45,5 @@ struct zone *next_zone(struct zone *zone
|
||||
}
|
||||
return zone;
|
||||
}
|
||||
-EXPORT_UNUSED_SYMBOL(next_zone); /* June 2006 */
|
||||
+EXPORT_SYMBOL(next_zone); /* Exported prior to 2.6.18 */
|
||||
|
||||
Index: linux+rh+chaos/mm/vmstat.c
|
||||
===================================================================
|
||||
--- linux+rh+chaos.orig/mm/vmstat.c
|
||||
+++ linux+rh+chaos/mm/vmstat.c
|
||||
@@ -45,6 +45,7 @@ void get_zone_counts(unsigned long *acti
|
||||
*free += n;
|
||||
}
|
||||
}
|
||||
+EXPORT_SYMBOL(get_zone_counts); /* Request clean upstream API for this */
|
||||
|
||||
#ifdef CONFIG_VM_EVENT_COUNTERS
|
||||
DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
|
Loading…
Reference in New Issue