Fix kernel unaligned access on sparc64

Update the SA_COPY_DATA macro to check if architecture supports
efficient unaligned memory accesses at compile time.  Otherwise
fallback to using the sa_copy_data() function.

The kernel provided CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is
used to determine availability in kernel space.  In user space
the x86_64, x86, powerpc, and sometimes arm architectures will
define the HAVE_EFFICIENT_UNALIGNED_ACCESS macro.

Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #7642 
Closes #7684
This commit is contained in:
Brian Behlendorf 2018-07-11 13:10:40 -07:00 committed by GitHub
parent 2dca37d8dc
commit 33a19e0fd9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 36 additions and 16 deletions

View File

@ -210,6 +210,14 @@
#include <sys/byteorder.h>
/*
* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS will be defined by the Linux
* kernel for architectures which support efficient unaligned access.
*/
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
#define HAVE_EFFICIENT_UNALIGNED_ACCESS
#endif
#if defined(__LITTLE_ENDIAN) && !defined(_LITTLE_ENDIAN)
#define _LITTLE_ENDIAN __LITTLE_ENDIAN
#endif

View File

@ -55,6 +55,7 @@ extern "C" {
#endif
#define _SUNOS_VTOC_16
#define HAVE_EFFICIENT_UNALIGNED_ACCESS
/* i386 arch specific defines */
#elif defined(__i386) || defined(__i386__)
@ -76,6 +77,7 @@ extern "C" {
#endif
#define _SUNOS_VTOC_16
#define HAVE_EFFICIENT_UNALIGNED_ACCESS
/* powerpc arch specific defines */
#elif defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__)
@ -99,6 +101,7 @@ extern "C" {
#endif
#define _SUNOS_VTOC_16
#define HAVE_EFFICIENT_UNALIGNED_ACCESS
/* arm arch specific defines */
#elif defined(__arm) || defined(__arm__) || defined(__aarch64__)
@ -129,6 +132,10 @@ extern "C" {
#define _SUNOS_VTOC_16
#if defined(__ARM_FEATURE_UNALIGNED)
#define HAVE_EFFICIENT_UNALIGNED_ACCESS
#endif
/* sparc arch specific defines */
#elif defined(__sparc) || defined(__sparc__)

View File

@ -28,7 +28,7 @@
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#if defined(__i386) || defined(__amd64)
#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
#include <sys/byteorder.h>
#define UNALIGNED_POINTERS_PERMITTED
#endif

View File

@ -150,8 +150,9 @@ arc_byteswap_func_t sa_bswap_table[] = {
zfs_acl_byteswap,
};
#ifdef HAVE_EFFICIENT_UNALIGNED_ACCESS
#define SA_COPY_DATA(f, s, t, l) \
{ \
do { \
if (f == NULL) { \
if (l == 8) { \
*(uint64_t *)t = *(uint64_t *)s; \
@ -162,9 +163,13 @@ arc_byteswap_func_t sa_bswap_table[] = {
} else { \
bcopy(s, t, l); \
} \
} else \
} else { \
sa_copy_data(f, s, t, l); \
}
} \
} while (0)
#else
#define SA_COPY_DATA(f, s, t, l) sa_copy_data(f, s, t, l)
#endif
/*
* This table is fixed and cannot be changed. Its purpose is to