From f288fdb4bd521f263277bcdc76cdec12a169a1e5 Mon Sep 17 00:00:00 2001 From: Rich Ercolani Date: Tue, 5 Mar 2024 21:35:36 -0500 Subject: [PATCH] Avoid save/restoring AMX registers to avoid a SPR erratum Intel SPR erratum SPR4 says that if you trip into a vmexit while doing FPU save/restore, your AMX register state might misbehave... and by misbehave, I mean save all zeroes incorrectly, leading to explosions if you restore it. Since we're not using AMX for anything, the simple way to avoid this is to just not save/restore those when we do anything, since we're killing preemption of any sort across our save/restores. If we ever decide to use AMX, it's not clear that we have any way to mitigate this, on Linux...but I am not an expert. Reviewed-by: Brian Behlendorf Signed-off-by: Rich Ercolani Closes #14989 Closes #15168 Signed-off-by: Rich Ercolani --- include/os/linux/kernel/linux/simd_x86.h | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/include/os/linux/kernel/linux/simd_x86.h b/include/os/linux/kernel/linux/simd_x86.h index 660f0d42de..38511fa5ad 100644 --- a/include/os/linux/kernel/linux/simd_x86.h +++ b/include/os/linux/kernel/linux/simd_x86.h @@ -157,6 +157,15 @@ #endif #endif +#ifndef XFEATURE_MASK_XTILE +/* + * For kernels where this doesn't exist yet, we still don't want to break + * by save/restoring this broken nonsense. + * See issue #14989 or Intel errata SPR4 for why + */ +#define XFEATURE_MASK_XTILE 0x60000 +#endif + #include #include @@ -290,7 +299,7 @@ kfpu_begin(void) */ union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()]; if (static_cpu_has(X86_FEATURE_XSAVE)) { - kfpu_save_xsave(&state->xsave, ~0); + kfpu_save_xsave(&state->xsave, ~XFEATURE_MASK_XTILE); } else if (static_cpu_has(X86_FEATURE_FXSR)) { kfpu_save_fxsr(&state->fxsave); } else { @@ -319,18 +328,18 @@ kfpu_begin(void) union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()]; #if defined(HAVE_XSAVES) if (static_cpu_has(X86_FEATURE_XSAVES)) { - kfpu_do_xsave("xsaves", &state->xsave, ~0); + kfpu_do_xsave("xsaves", &state->xsave, ~XFEATURE_MASK_XTILE); return; } #endif #if defined(HAVE_XSAVEOPT) if (static_cpu_has(X86_FEATURE_XSAVEOPT)) { - kfpu_do_xsave("xsaveopt", &state->xsave, ~0); + kfpu_do_xsave("xsaveopt", &state->xsave, ~XFEATURE_MASK_XTILE); return; } #endif if (static_cpu_has(X86_FEATURE_XSAVE)) { - kfpu_do_xsave("xsave", &state->xsave, ~0); + kfpu_do_xsave("xsave", &state->xsave, ~XFEATURE_MASK_XTILE); } else if (static_cpu_has(X86_FEATURE_FXSR)) { kfpu_save_fxsr(&state->fxsave); } else { @@ -396,7 +405,7 @@ kfpu_end(void) union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()]; if (static_cpu_has(X86_FEATURE_XSAVE)) { - kfpu_restore_xsave(&state->xsave, ~0); + kfpu_restore_xsave(&state->xsave, ~XFEATURE_MASK_XTILE); } else if (static_cpu_has(X86_FEATURE_FXSR)) { kfpu_restore_fxsr(&state->fxsave); } else { @@ -415,12 +424,12 @@ kfpu_end(void) union fpregs_state *state = zfs_kfpu_fpregs[smp_processor_id()]; #if defined(HAVE_XSAVES) if (static_cpu_has(X86_FEATURE_XSAVES)) { - kfpu_do_xrstor("xrstors", &state->xsave, ~0); + kfpu_do_xrstor("xrstors", &state->xsave, ~XFEATURE_MASK_XTILE); goto out; } #endif if (static_cpu_has(X86_FEATURE_XSAVE)) { - kfpu_do_xrstor("xrstor", &state->xsave, ~0); + kfpu_do_xrstor("xrstor", &state->xsave, ~XFEATURE_MASK_XTILE); } else if (static_cpu_has(X86_FEATURE_FXSR)) { kfpu_restore_fxsr(&state->fxsave); } else {