zfs/module/zcommon/zfs_fletcher_aarch64_neon.c

216 lines
7.6 KiB
C
Raw Normal View History

/*
* Implement fast Fletcher4 with NEON instructions. (aarch64)
*
* Use the 128-bit NEON SIMD instructions and registers to compute
* Fletcher4 in two incremental 64-bit parallel accumulator streams,
* and then combine the streams to form the final four checksum words.
* This implementation is a derivative of the AVX SIMD implementation by
* James Guilford and Jinshan Xiong from Intel (see zfs_fletcher_intel.c).
*
* Copyright (C) 2016 Romain Dolbeau.
*
* Authors:
* Romain Dolbeau <romain.dolbeau@atos.net>
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#if defined(__aarch64__)
#include <sys/simd.h>
#include <sys/spa_checksum.h>
Update build system and packaging Minimal changes required to integrate the SPL sources in to the ZFS repository build infrastructure and packaging. Build system and packaging: * Renamed SPL_* autoconf m4 macros to ZFS_*. * Removed redundant SPL_* autoconf m4 macros. * Updated the RPM spec files to remove SPL package dependency. * The zfs package obsoletes the spl package, and the zfs-kmod package obsoletes the spl-kmod package. * The zfs-kmod-devel* packages were updated to add compatibility symlinks under /usr/src/spl-x.y.z until all dependent packages can be updated. They will be removed in a future release. * Updated copy-builtin script for in-kernel builds. * Updated DKMS package to include the spl.ko. * Updated stale AUTHORS file to include all contributors. * Updated stale COPYRIGHT and included the SPL as an exception. * Renamed README.markdown to README.md * Renamed OPENSOLARIS.LICENSE to LICENSE. * Renamed DISCLAIMER to NOTICE. Required code changes: * Removed redundant HAVE_SPL macro. * Removed _BOOT from nvpairs since it doesn't apply for Linux. * Initial header cleanup (removal of empty headers, refactoring). * Remove SPL repository clone/build from zimport.sh. * Use of DEFINE_RATELIMIT_STATE and DEFINE_SPINLOCK removed due to build issues when forcing C99 compilation. * Replaced legacy ACCESS_ONCE with READ_ONCE. * Include needed headers for `current` and `EXPORT_SYMBOL`. Reviewed-by: Tony Hutter <hutter2@llnl.gov> Reviewed-by: Olaf Faaland <faaland1@llnl.gov> Reviewed-by: Matthew Ahrens <mahrens@delphix.com> Reviewed-by: Pavel Zakharov <pavel.zakharov@delphix.com> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> TEST_ZIMPORT_SKIP="yes" Closes #7556
2018-02-16 01:53:18 +00:00
#include <sys/strings.h>
#include <zfs_fletcher.h>
static void
fletcher_4_aarch64_neon_init(fletcher_4_ctx_t *ctx)
{
bzero(ctx->aarch64_neon, 4 * sizeof (zfs_fletcher_aarch64_neon_t));
}
static void
fletcher_4_aarch64_neon_fini(fletcher_4_ctx_t *ctx, zio_cksum_t *zcp)
{
uint64_t A, B, C, D;
A = ctx->aarch64_neon[0].v[0] + ctx->aarch64_neon[0].v[1];
B = 2 * ctx->aarch64_neon[1].v[0] + 2 * ctx->aarch64_neon[1].v[1] -
ctx->aarch64_neon[0].v[1];
C = 4 * ctx->aarch64_neon[2].v[0] - ctx->aarch64_neon[1].v[0] +
4 * ctx->aarch64_neon[2].v[1] - 3 * ctx->aarch64_neon[1].v[1];
D = 8 * ctx->aarch64_neon[3].v[0] - 4 * ctx->aarch64_neon[2].v[0] +
8 * ctx->aarch64_neon[3].v[1] - 8 * ctx->aarch64_neon[2].v[1] +
ctx->aarch64_neon[1].v[1];
ZIO_SET_CHECKSUM(zcp, A, B, C, D);
}
#define NEON_INIT_LOOP() \
asm("eor %[ZERO].16b,%[ZERO].16b,%[ZERO].16b\n" \
"ld1 { %[ACC0].4s }, %[CTX0]\n" \
"ld1 { %[ACC1].4s }, %[CTX1]\n" \
"ld1 { %[ACC2].4s }, %[CTX2]\n" \
"ld1 { %[ACC3].4s }, %[CTX3]\n" \
: [ZERO] "=w" (ZERO), \
[ACC0] "=w" (ACC0), [ACC1] "=w" (ACC1), \
[ACC2] "=w" (ACC2), [ACC3] "=w" (ACC3) \
: [CTX0] "Q" (ctx->aarch64_neon[0]), \
[CTX1] "Q" (ctx->aarch64_neon[1]), \
[CTX2] "Q" (ctx->aarch64_neon[2]), \
[CTX3] "Q" (ctx->aarch64_neon[3]))
#define NEON_DO_REVERSE "rev32 %[SRC].16b, %[SRC].16b\n"
#define NEON_DONT_REVERSE ""
#define NEON_MAIN_LOOP(REVERSE) \
asm("ld1 { %[SRC].4s }, %[IP]\n" \
REVERSE \
"zip1 %[TMP1].4s, %[SRC].4s, %[ZERO].4s\n" \
"zip2 %[TMP2].4s, %[SRC].4s, %[ZERO].4s\n" \
"add %[ACC0].2d, %[ACC0].2d, %[TMP1].2d\n" \
"add %[ACC1].2d, %[ACC1].2d, %[ACC0].2d\n" \
"add %[ACC2].2d, %[ACC2].2d, %[ACC1].2d\n" \
"add %[ACC3].2d, %[ACC3].2d, %[ACC2].2d\n" \
"add %[ACC0].2d, %[ACC0].2d, %[TMP2].2d\n" \
"add %[ACC1].2d, %[ACC1].2d, %[ACC0].2d\n" \
"add %[ACC2].2d, %[ACC2].2d, %[ACC1].2d\n" \
"add %[ACC3].2d, %[ACC3].2d, %[ACC2].2d\n" \
: [SRC] "=&w" (SRC), \
[TMP1] "=&w" (TMP1), [TMP2] "=&w" (TMP2), \
[ACC0] "+w" (ACC0), [ACC1] "+w" (ACC1), \
[ACC2] "+w" (ACC2), [ACC3] "+w" (ACC3) \
: [ZERO] "w" (ZERO), [IP] "Q" (*ip))
#define NEON_FINI_LOOP() \
asm("st1 { %[ACC0].4s },%[DST0]\n" \
"st1 { %[ACC1].4s },%[DST1]\n" \
"st1 { %[ACC2].4s },%[DST2]\n" \
"st1 { %[ACC3].4s },%[DST3]\n" \
: [DST0] "=Q" (ctx->aarch64_neon[0]), \
[DST1] "=Q" (ctx->aarch64_neon[1]), \
[DST2] "=Q" (ctx->aarch64_neon[2]), \
[DST3] "=Q" (ctx->aarch64_neon[3]) \
: [ACC0] "w" (ACC0), [ACC1] "w" (ACC1), \
[ACC2] "w" (ACC2), [ACC3] "w" (ACC3))
static void
fletcher_4_aarch64_neon_native(fletcher_4_ctx_t *ctx,
const void *buf, uint64_t size)
{
const uint64_t *ip = buf;
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
#if defined(_KERNEL)
register unsigned char ZERO asm("v0") __attribute__((vector_size(16)));
register unsigned char ACC0 asm("v1") __attribute__((vector_size(16)));
register unsigned char ACC1 asm("v2") __attribute__((vector_size(16)));
register unsigned char ACC2 asm("v3") __attribute__((vector_size(16)));
register unsigned char ACC3 asm("v4") __attribute__((vector_size(16)));
register unsigned char TMP1 asm("v5") __attribute__((vector_size(16)));
register unsigned char TMP2 asm("v6") __attribute__((vector_size(16)));
register unsigned char SRC asm("v7") __attribute__((vector_size(16)));
#else
unsigned char ZERO __attribute__((vector_size(16)));
unsigned char ACC0 __attribute__((vector_size(16)));
unsigned char ACC1 __attribute__((vector_size(16)));
unsigned char ACC2 __attribute__((vector_size(16)));
unsigned char ACC3 __attribute__((vector_size(16)));
unsigned char TMP1 __attribute__((vector_size(16)));
unsigned char TMP2 __attribute__((vector_size(16)));
unsigned char SRC __attribute__((vector_size(16)));
#endif
kfpu_begin();
NEON_INIT_LOOP();
for (; ip < ipend; ip += 2) {
NEON_MAIN_LOOP(NEON_DONT_REVERSE);
}
NEON_FINI_LOOP();
kfpu_end();
}
static void
fletcher_4_aarch64_neon_byteswap(fletcher_4_ctx_t *ctx,
const void *buf, uint64_t size)
{
const uint64_t *ip = buf;
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
#if defined(_KERNEL)
register unsigned char ZERO asm("v0") __attribute__((vector_size(16)));
register unsigned char ACC0 asm("v1") __attribute__((vector_size(16)));
register unsigned char ACC1 asm("v2") __attribute__((vector_size(16)));
register unsigned char ACC2 asm("v3") __attribute__((vector_size(16)));
register unsigned char ACC3 asm("v4") __attribute__((vector_size(16)));
register unsigned char TMP1 asm("v5") __attribute__((vector_size(16)));
register unsigned char TMP2 asm("v6") __attribute__((vector_size(16)));
register unsigned char SRC asm("v7") __attribute__((vector_size(16)));
#else
unsigned char ZERO __attribute__((vector_size(16)));
unsigned char ACC0 __attribute__((vector_size(16)));
unsigned char ACC1 __attribute__((vector_size(16)));
unsigned char ACC2 __attribute__((vector_size(16)));
unsigned char ACC3 __attribute__((vector_size(16)));
unsigned char TMP1 __attribute__((vector_size(16)));
unsigned char TMP2 __attribute__((vector_size(16)));
unsigned char SRC __attribute__((vector_size(16)));
#endif
kfpu_begin();
NEON_INIT_LOOP();
for (; ip < ipend; ip += 2) {
NEON_MAIN_LOOP(NEON_DO_REVERSE);
}
NEON_FINI_LOOP();
kfpu_end();
}
static boolean_t fletcher_4_aarch64_neon_valid(void)
{
Linux 5.0 compat: SIMD compatibility Restore the SIMD optimization for 4.19.38 LTS, 4.14.120 LTS, and 5.0 and newer kernels. This is accomplished by leveraging the fact that by definition dedicated kernel threads never need to concern themselves with saving and restoring the user FPU state. Therefore, they may use the FPU as long as we can guarantee user tasks always restore their FPU state before context switching back to user space. For the 5.0 and 5.1 kernels disabling preemption and local interrupts is sufficient to allow the FPU to be used. All non-kernel threads will restore the preserved user FPU state. For 5.2 and latter kernels the user FPU state restoration will be skipped if the kernel determines the registers have not changed. Therefore, for these kernels we need to perform the additional step of saving and restoring the FPU registers. Invalidating the per-cpu global tracking the FPU state would force a restore but that functionality is private to the core x86 FPU implementation and unavailable. In practice, restricting SIMD to kernel threads is not a major restriction for ZFS. The vast majority of SIMD operations are already performed by the IO pipeline. The remaining cases are relatively infrequent and can be handled by the generic code without significant impact. The two most noteworthy cases are: 1) Decrypting the wrapping key for an encrypted dataset, i.e. `zfs load-key`. All other encryption and decryption operations will use the SIMD optimized implementations. 2) Generating the payload checksums for a `zfs send` stream. In order to avoid making any changes to the higher layers of ZFS all of the `*_get_ops()` functions were updated to take in to consideration the calling context. This allows for the fastest implementation to be used as appropriate (see kfpu_allowed()). The only other notable instance of SIMD operations being used outside a kernel thread was at module load time. This code was moved in to a taskq in order to accommodate the new kernel thread restriction. Finally, a few other modifications were made in order to further harden this code and facilitate testing. They include updating each implementations operations structure to be declared as a constant. And allowing "cycle" to be set when selecting the preferred ops in the kernel as well as user space. Reviewed-by: Tony Hutter <hutter2@llnl.gov> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #8754 Closes #8793 Closes #8965
2019-07-12 16:31:20 +00:00
return (kfpu_allowed());
}
const fletcher_4_ops_t fletcher_4_aarch64_neon_ops = {
.init_native = fletcher_4_aarch64_neon_init,
.compute_native = fletcher_4_aarch64_neon_native,
.fini_native = fletcher_4_aarch64_neon_fini,
.init_byteswap = fletcher_4_aarch64_neon_init,
.compute_byteswap = fletcher_4_aarch64_neon_byteswap,
.fini_byteswap = fletcher_4_aarch64_neon_fini,
.valid = fletcher_4_aarch64_neon_valid,
.name = "aarch64_neon"
};
#endif /* defined(__aarch64__) */