Apply two nice improvements caught by Ricardo,
spl-05-div64.patch This is a much less intrusive fix for undefined 64-bit division symbols when compiling the DMU in 32-bit kernels. * spl-06-atomic64.patch This is a workaround for 32-bit kernels that don't have atomic64_t. git-svn-id: https://outreach.scidac.gov/svn/spl/trunk@162 7e1ea52c-4ff2-0310-8f11-9dd32ca42a1c
This commit is contained in:
parent
749045bbfa
commit
550f170525
|
@ -653,6 +653,7 @@ AC_DEFUN([SPL_AC_DIV64_64], [
|
||||||
AC_MSG_CHECKING([whether div64_64() is available])
|
AC_MSG_CHECKING([whether div64_64() is available])
|
||||||
SPL_LINUX_TRY_COMPILE([
|
SPL_LINUX_TRY_COMPILE([
|
||||||
#include <asm/div64.h>
|
#include <asm/div64.h>
|
||||||
|
#include <linux/types.h>
|
||||||
],[
|
],[
|
||||||
uint64_t i = div64_64(1ULL, 1ULL);
|
uint64_t i = div64_64(1ULL, 1ULL);
|
||||||
],[
|
],[
|
||||||
|
|
|
@ -0,0 +1,58 @@
|
||||||
|
#ifndef _SPL_ATOMIC_COMPAT_H
|
||||||
|
#define _SPL_ATOMIC_COMPAT_H
|
||||||
|
|
||||||
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
#ifndef HAVE_ATOMIC64_T
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
spinlock_t lock;
|
||||||
|
__s64 val;
|
||||||
|
} atomic64_t;
|
||||||
|
|
||||||
|
#define ATOMIC64_INIT(i) { .lock = SPIN_LOCK_UNLOCKED, .val = (i) }
|
||||||
|
|
||||||
|
static inline void atomic64_add(__s64 i, atomic64_t *v)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&v->lock, flags);
|
||||||
|
v->val += i;
|
||||||
|
spin_unlock_irqrestore(&v->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void atomic64_sub(__s64 i, atomic64_t *v)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&v->lock, flags);
|
||||||
|
v->val -= i;
|
||||||
|
spin_unlock_irqrestore(&v->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline __s64 atomic64_read(atomic64_t *v)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
__s64 r;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&v->lock, flags);
|
||||||
|
r = v->val;
|
||||||
|
spin_unlock_irqrestore(&v->lock, flags);
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void atomic64_set(atomic64_t *v, __s64 i)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&v->lock, flags);
|
||||||
|
v->val = i;
|
||||||
|
spin_unlock_irqrestore(&v->lock, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* HAVE_ATOMIC64_T */
|
||||||
|
|
||||||
|
#endif /* _SPL_ATOMIC_COMPAT_H */
|
||||||
|
|
|
@ -1,44 +0,0 @@
|
||||||
/*
|
|
||||||
* This file is part of the SPL: Solaris Porting Layer.
|
|
||||||
*
|
|
||||||
* Copyright (c) 2008 Sun Microsystems, Inc.
|
|
||||||
*
|
|
||||||
* This is free software; you can redistribute it and/or modify it
|
|
||||||
* under the terms of the GNU General Public License as published by
|
|
||||||
* the Free Software Foundation; either version 2 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*
|
|
||||||
* This is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
* for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License along
|
|
||||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
|
||||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _SPL_DIV64_H
|
|
||||||
#define _SPL_DIV64_H
|
|
||||||
|
|
||||||
#include <asm/div64.h>
|
|
||||||
|
|
||||||
#ifndef HAVE_DIV64_64
|
|
||||||
#if BITS_PER_LONG == 32
|
|
||||||
|
|
||||||
extern uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor);
|
|
||||||
#define div64_64(a,b) spl_div64_64(a,b)
|
|
||||||
|
|
||||||
#else /* BITS_PER_LONG == 32 */
|
|
||||||
|
|
||||||
static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
|
|
||||||
{
|
|
||||||
return dividend / divisor;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* BITS_PER_LONG == 32 */
|
|
||||||
#endif /* HAVE_DIV64_64 */
|
|
||||||
|
|
||||||
#define roundup64(x, y) (div64_64((x) + ((y) - 1), (y)) * (y))
|
|
||||||
|
|
||||||
#endif /* _SPL_DIV64_H */
|
|
|
@ -42,8 +42,10 @@ extern "C" {
|
||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
#include <linux/hash.h>
|
#include <linux/hash.h>
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
|
#include <asm/atomic_compat.h>
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/debug.h>
|
#include <sys/debug.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Memory allocation interfaces
|
* Memory allocation interfaces
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -88,12 +88,15 @@ highbit(unsigned long i)
|
||||||
EXPORT_SYMBOL(highbit);
|
EXPORT_SYMBOL(highbit);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Implementation of div64_64(), for kernels that don't have it.
|
* Implementation of 64 bit division for 32-bit machines.
|
||||||
*
|
|
||||||
* Taken from a 2.6.24 kernel.
|
|
||||||
*/
|
*/
|
||||||
uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor)
|
#if BITS_PER_LONG == 32
|
||||||
|
uint64_t __udivdi3(uint64_t dividend, uint64_t divisor)
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_DIV64_64
|
||||||
|
return div64_64(dividend, divisor);
|
||||||
|
#else
|
||||||
|
/* Taken from a 2.6.24 kernel. */
|
||||||
uint32_t high, d;
|
uint32_t high, d;
|
||||||
|
|
||||||
high = divisor >> 32;
|
high = divisor >> 32;
|
||||||
|
@ -108,8 +111,19 @@ uint64_t spl_div64_64(uint64_t dividend, uint64_t divisor)
|
||||||
do_div(dividend, d);
|
do_div(dividend, d);
|
||||||
|
|
||||||
return dividend;
|
return dividend;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(spl_div64_64);
|
EXPORT_SYMBOL(__udivdi3);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Implementation of 64 bit modulo for 32-bit machines.
|
||||||
|
*/
|
||||||
|
uint64_t __umoddi3(uint64_t dividend, uint64_t divisor)
|
||||||
|
{
|
||||||
|
return dividend - divisor * (dividend / divisor);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__umoddi3);
|
||||||
|
#endif
|
||||||
|
|
||||||
int
|
int
|
||||||
ddi_strtoul(const char *str, char **nptr, int base, unsigned long *result)
|
ddi_strtoul(const char *str, char **nptr, int base, unsigned long *result)
|
||||||
|
|
|
@ -43,9 +43,9 @@
|
||||||
*/
|
*/
|
||||||
#ifdef DEBUG_KMEM
|
#ifdef DEBUG_KMEM
|
||||||
/* Shim layer memory accounting */
|
/* Shim layer memory accounting */
|
||||||
atomic64_t kmem_alloc_used;
|
atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
|
||||||
unsigned long kmem_alloc_max = 0;
|
unsigned long kmem_alloc_max = 0;
|
||||||
atomic64_t vmem_alloc_used;
|
atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
|
||||||
unsigned long vmem_alloc_max = 0;
|
unsigned long vmem_alloc_max = 0;
|
||||||
int kmem_warning_flag = 1;
|
int kmem_warning_flag = 1;
|
||||||
|
|
||||||
|
@ -1031,12 +1031,12 @@ spl_kmem_fini(void)
|
||||||
* a serious concern here since it is module unload time. */
|
* a serious concern here since it is module unload time. */
|
||||||
if (atomic64_read(&kmem_alloc_used) != 0)
|
if (atomic64_read(&kmem_alloc_used) != 0)
|
||||||
CWARN("kmem leaked %ld/%ld bytes\n",
|
CWARN("kmem leaked %ld/%ld bytes\n",
|
||||||
atomic_read(&kmem_alloc_used), kmem_alloc_max);
|
atomic64_read(&kmem_alloc_used), kmem_alloc_max);
|
||||||
|
|
||||||
|
|
||||||
if (atomic64_read(&vmem_alloc_used) != 0)
|
if (atomic64_read(&vmem_alloc_used) != 0)
|
||||||
CWARN("vmem leaked %ld/%ld bytes\n",
|
CWARN("vmem leaked %ld/%ld bytes\n",
|
||||||
atomic_read(&vmem_alloc_used), vmem_alloc_max);
|
atomic64_read(&vmem_alloc_used), vmem_alloc_max);
|
||||||
|
|
||||||
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
|
spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
|
||||||
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
|
spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
|
||||||
|
|
|
@ -415,7 +415,7 @@ proc_doatomic64(struct ctl_table *table, int write, struct file *filp,
|
||||||
if (write) {
|
if (write) {
|
||||||
*ppos += *lenp;
|
*ppos += *lenp;
|
||||||
} else {
|
} else {
|
||||||
val = atomic_read((atomic64_t *)table->data);
|
val = atomic64_read((atomic64_t *)table->data);
|
||||||
rc = proc_doulongvec_minmax(&dummy, write, filp,
|
rc = proc_doulongvec_minmax(&dummy, write, filp,
|
||||||
buffer, lenp, ppos);
|
buffer, lenp, ppos);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue