Prevent segfaults in SSE optimized Fletcher-4
In some cases, the compiler was not respecting the GNU aligned
attribute for stack variables in 35a76a0
. This was resulting in
a segfault on CentOS 6.7 hosts using gcc 4.4.7-17. This issue
was fixed in gcc 4.6.
To prevent this from occurring, use unaligned loads and stores
for all stack and global memory references in the SSE optimized
Fletcher-4 code.
Disable zimport testing against master where this flaw exists:
TEST_ZIMPORT_VERSIONS="installed"
Signed-off-by: Tyler J. Stachecki <stachecki.tyler@gmail.com>
Signed-off-by: Gvozden Neskovic <neskovic@gmail.com>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #4862
This commit is contained in:
parent
1b87e0f532
commit
3d11ecbddd
|
@ -69,12 +69,12 @@ fletcher_4_sse2_fini(zio_cksum_t *zcp)
|
||||||
struct zfs_fletcher_sse_array a, b, c, d;
|
struct zfs_fletcher_sse_array a, b, c, d;
|
||||||
uint64_t A, B, C, D;
|
uint64_t A, B, C, D;
|
||||||
|
|
||||||
asm volatile("movdqa %%xmm0, %0":"=m" (a.v));
|
asm volatile("movdqu %%xmm0, %0":"=m" (a.v));
|
||||||
asm volatile("movdqa %%xmm1, %0":"=m" (b.v));
|
asm volatile("movdqu %%xmm1, %0":"=m" (b.v));
|
||||||
asm volatile("psllq $0x2, %xmm2");
|
asm volatile("psllq $0x2, %xmm2");
|
||||||
asm volatile("movdqa %%xmm2, %0":"=m" (c.v));
|
asm volatile("movdqu %%xmm2, %0":"=m" (c.v));
|
||||||
asm volatile("psllq $0x3, %xmm3");
|
asm volatile("psllq $0x3, %xmm3");
|
||||||
asm volatile("movdqa %%xmm3, %0":"=m" (d.v));
|
asm volatile("movdqu %%xmm3, %0":"=m" (d.v));
|
||||||
|
|
||||||
kfpu_end();
|
kfpu_end();
|
||||||
|
|
||||||
|
@ -168,7 +168,7 @@ fletcher_4_ssse3_byteswap(const void *buf, uint64_t size, zio_cksum_t *unused)
|
||||||
const uint64_t *ip = buf;
|
const uint64_t *ip = buf;
|
||||||
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
const uint64_t *ipend = (uint64_t *)((uint8_t *)ip + size);
|
||||||
|
|
||||||
asm volatile("movdqa %0, %%xmm7"::"m" (mask));
|
asm volatile("movdqu %0, %%xmm7"::"m" (mask));
|
||||||
asm volatile("pxor %xmm4, %xmm4");
|
asm volatile("pxor %xmm4, %xmm4");
|
||||||
|
|
||||||
for (; ip < ipend; ip += 2) {
|
for (; ip < ipend; ip += 2) {
|
||||||
|
|
Loading…
Reference in New Issue