From 037e4f25367aaefb68f3c3a67e4c737de420727f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Attila=20F=C3=BCl=C3=B6p?= Date: Mon, 23 Jan 2023 20:25:21 +0100 Subject: [PATCH] x86 asm: Replace .align with .balign MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The .align directive used to align storage locations is ambiguous. On some platforms and assemblers it takes a byte count, on others the argument is interpreted as a shift value. The current usage expects the first interpretation. Replace it with the unambiguous .balign directive which always expects a byte count, regardless of platform and assembler. Reviewed-by: Jorgen Lundman Reviewed-by: Tino Reichardt Reviewed-by: Richard Yao Signed-off-by: Attila Fülöp Closes #14422 --- include/os/freebsd/spl/sys/ia32/asm_linkage.h | 10 +++---- include/os/linux/spl/sys/ia32/asm_linkage.h | 10 +++---- .../include/os/freebsd/sys/ia32/asm_linkage.h | 10 +++---- .../include/os/linux/sys/ia32/asm_linkage.h | 10 +++---- module/icp/asm-x86_64/aes/aes_aesni.S | 16 ++++++------ module/icp/asm-x86_64/aes/aes_amd64.S | 4 +-- .../icp/asm-x86_64/modes/aesni-gcm-x86_64.S | 26 +++++++++---------- module/icp/asm-x86_64/modes/gcm_pclmulqdq.S | 2 +- module/icp/asm-x86_64/modes/ghash-x86_64.S | 14 +++++----- module/icp/asm-x86_64/sha2/sha256_impl.S | 6 ++--- module/icp/asm-x86_64/sha2/sha512_impl.S | 6 ++--- module/lua/setjmp/setjmp_aarch64.S | 2 +- module/lua/setjmp/setjmp_arm.S | 2 +- module/lua/setjmp/setjmp_i386.S | 2 +- module/lua/setjmp/setjmp_ppc.S | 4 +-- module/lua/setjmp/setjmp_sparc64.S | 2 +- 16 files changed, 63 insertions(+), 63 deletions(-) diff --git a/include/os/freebsd/spl/sys/ia32/asm_linkage.h b/include/os/freebsd/spl/sys/ia32/asm_linkage.h index bbbd220302..058d600007 100644 --- a/include/os/freebsd/spl/sys/ia32/asm_linkage.h +++ b/include/os/freebsd/spl/sys/ia32/asm_linkage.h @@ -127,19 +127,19 @@ extern "C" { */ #define ENTRY(x) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x; \ x: MCOUNT(x) #define ENTRY_NP(x) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x; \ x: #define ENTRY_ALIGN(x, a) \ .text; \ - .align a; \ + .balign a; \ .globl x; \ x: @@ -148,14 +148,14 @@ x: */ #define ENTRY2(x, y) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x, y; \ x:; \ y: MCOUNT(x) #define ENTRY_NP2(x, y) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x, y; \ x:; \ y: diff --git a/include/os/linux/spl/sys/ia32/asm_linkage.h b/include/os/linux/spl/sys/ia32/asm_linkage.h index 2864d94551..3aaa4af5da 100644 --- a/include/os/linux/spl/sys/ia32/asm_linkage.h +++ b/include/os/linux/spl/sys/ia32/asm_linkage.h @@ -149,21 +149,21 @@ extern "C" { #undef ENTRY #define ENTRY(x) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x; \ .type x, @function; \ x: MCOUNT(x) #define ENTRY_NP(x) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x; \ .type x, @function; \ x: #define ENTRY_ALIGN(x, a) \ .text; \ - .align a; \ + .balign a; \ .globl x; \ .type x, @function; \ x: @@ -177,7 +177,7 @@ x: */ #define ENTRY2(x, y) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x, y; \ .type x, @function; \ .type y, @function; \ @@ -186,7 +186,7 @@ y: MCOUNT(x) #define ENTRY_NP2(x, y) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x, y; \ .type x, @function; \ .type y, @function; \ diff --git a/lib/libspl/include/os/freebsd/sys/ia32/asm_linkage.h b/lib/libspl/include/os/freebsd/sys/ia32/asm_linkage.h index 3b4beecc5d..9964f183cc 100644 --- a/lib/libspl/include/os/freebsd/sys/ia32/asm_linkage.h +++ b/lib/libspl/include/os/freebsd/sys/ia32/asm_linkage.h @@ -130,19 +130,19 @@ extern "C" { */ #define ENTRY(x) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x; \ x: MCOUNT(x) #define ENTRY_NP(x) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x; \ x: #define ENTRY_ALIGN(x, a) \ .text; \ - .align a; \ + .balign a; \ .globl x; \ x: @@ -155,14 +155,14 @@ x: */ #define ENTRY2(x, y) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x, y; \ x:; \ y: MCOUNT(x) #define ENTRY_NP2(x, y) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x, y; \ x:; \ y: diff --git a/lib/libspl/include/os/linux/sys/ia32/asm_linkage.h b/lib/libspl/include/os/linux/sys/ia32/asm_linkage.h index 76765dd040..f075961233 100644 --- a/lib/libspl/include/os/linux/sys/ia32/asm_linkage.h +++ b/lib/libspl/include/os/linux/sys/ia32/asm_linkage.h @@ -149,21 +149,21 @@ extern "C" { #undef ENTRY #define ENTRY(x) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x; \ .type x, @function; \ x: MCOUNT(x) #define ENTRY_NP(x) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x; \ .type x, @function; \ x: #define ENTRY_ALIGN(x, a) \ .text; \ - .align a; \ + .balign a; \ .globl x; \ .type x, @function; \ x: @@ -177,7 +177,7 @@ x: */ #define ENTRY2(x, y) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x, y; \ .type x, @function; \ .type y, @function; \ @@ -186,7 +186,7 @@ y: MCOUNT(x) #define ENTRY_NP2(x, y) \ .text; \ - .align ASM_ENTRY_ALIGN; \ + .balign ASM_ENTRY_ALIGN; \ .globl x, y; \ .type x, @function; \ .type y, @function; \ diff --git a/module/icp/asm-x86_64/aes/aes_aesni.S b/module/icp/asm-x86_64/aes/aes_aesni.S index f622235bd1..4f3fe3ec65 100644 --- a/module/icp/asm-x86_64/aes/aes_aesni.S +++ b/module/icp/asm-x86_64/aes/aes_aesni.S @@ -378,7 +378,7 @@ rijndael_key_setup_enc_intel_local: FRAME_END RET -.align 4 +.balign 4 .Lenc_key192: cmp $192, %KEYSIZE32 jnz .Lenc_key128 @@ -415,7 +415,7 @@ rijndael_key_setup_enc_intel_local: FRAME_END RET -.align 4 +.balign 4 .Lenc_key128: cmp $128, %KEYSIZE32 jnz .Lenc_key_invalid_key_bits @@ -522,7 +522,7 @@ FRAME_BEGIN add %AESKEY, %ROUNDS64 mov %ROUNDS64, %ENDAESKEY -.align 4 +.balign 4 .Ldec_key_reorder_loop: movups (%AESKEY), %xmm0 movups (%ROUNDS64), %xmm1 @@ -533,7 +533,7 @@ FRAME_BEGIN cmp %AESKEY, %ROUNDS64 ja .Ldec_key_reorder_loop -.align 4 +.balign 4 .Ldec_key_inv_loop: movups (%rcx), %xmm0 // Convert an encryption round key to a form usable for decryption @@ -622,7 +622,7 @@ ENTRY_NP(aes_encrypt_intel) movups -0x50(%KEYP), %KEY aesenc %KEY, %STATE -.align 4 +.balign 4 .Lenc192: // AES 192 and 256 movups -0x40(%KEYP), %KEY @@ -630,7 +630,7 @@ ENTRY_NP(aes_encrypt_intel) movups -0x30(%KEYP), %KEY aesenc %KEY, %STATE -.align 4 +.balign 4 .Lenc128: // AES 128, 192, and 256 movups -0x20(%KEYP), %KEY @@ -705,7 +705,7 @@ ENTRY_NP(aes_decrypt_intel) movups -0x50(%KEYP), %KEY aesdec %KEY, %STATE -.align 4 +.balign 4 .Ldec192: // AES 192 and 256 movups -0x40(%KEYP), %KEY @@ -713,7 +713,7 @@ ENTRY_NP(aes_decrypt_intel) movups -0x30(%KEYP), %KEY aesdec %KEY, %STATE -.align 4 +.balign 4 .Ldec128: // AES 128, 192, and 256 movups -0x20(%KEYP), %KEY diff --git a/module/icp/asm-x86_64/aes/aes_amd64.S b/module/icp/asm-x86_64/aes/aes_amd64.S index d5cf4040fb..c4870a28ea 100644 --- a/module/icp/asm-x86_64/aes/aes_amd64.S +++ b/module/icp/asm-x86_64/aes/aes_amd64.S @@ -694,7 +694,7 @@ aes_decrypt_amd64(const uint32_t rk[], int Nr, const uint32_t ct[4], * unsigned char *out, const aes_encrypt_ctx cx[1])/ */ SECTION_STATIC -.align 64 +.balign 64 enc_tab: enc_vals(u8) #ifdef LAST_ROUND_TABLES @@ -800,7 +800,7 @@ ENTRY_NP(aes_encrypt_amd64) * unsigned char *out, const aes_encrypt_ctx cx[1])/ */ SECTION_STATIC -.align 64 +.balign 64 dec_tab: dec_vals(v8) #ifdef LAST_ROUND_TABLES diff --git a/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S b/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S index 75dd2c721f..165492a0ed 100644 --- a/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S +++ b/module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S @@ -58,7 +58,7 @@ .text #ifdef HAVE_MOVBE -.align 32 +.balign 32 FUNCTION(_aesni_ctr32_ghash_6x) .cfi_startproc ENDBR @@ -75,7 +75,7 @@ FUNCTION(_aesni_ctr32_ghash_6x) vmovdqu %xmm4,16+8(%rsp) jmp .Loop6x -.align 32 +.balign 32 .Loop6x: addl $100663296,%ebx jc .Lhandle_ctr32 @@ -287,7 +287,7 @@ FUNCTION(_aesni_ctr32_ghash_6x) vmovups 224-128(%rcx),%xmm1 jmp .Lenc_tail -.align 32 +.balign 32 .Lhandle_ctr32: vmovdqu (%r11),%xmm0 vpshufb %xmm0,%xmm1,%xmm6 @@ -309,7 +309,7 @@ FUNCTION(_aesni_ctr32_ghash_6x) vpshufb %xmm0,%xmm1,%xmm1 jmp .Lresume_ctr32 -.align 32 +.balign 32 .Lenc_tail: vaesenc %xmm15,%xmm9,%xmm9 vmovdqu %xmm7,16+8(%rsp) @@ -374,7 +374,7 @@ FUNCTION(_aesni_ctr32_ghash_6x) SET_SIZE(_aesni_ctr32_ghash_6x) #endif /* ifdef HAVE_MOVBE */ -.align 32 +.balign 32 FUNCTION(_aesni_ctr32_ghash_no_movbe_6x) .cfi_startproc ENDBR @@ -391,7 +391,7 @@ FUNCTION(_aesni_ctr32_ghash_no_movbe_6x) vmovdqu %xmm4,16+8(%rsp) jmp .Loop6x_nmb -.align 32 +.balign 32 .Loop6x_nmb: addl $100663296,%ebx jc .Lhandle_ctr32_nmb @@ -615,7 +615,7 @@ FUNCTION(_aesni_ctr32_ghash_no_movbe_6x) vmovups 224-128(%rcx),%xmm1 jmp .Lenc_tail_nmb -.align 32 +.balign 32 .Lhandle_ctr32_nmb: vmovdqu (%r11),%xmm0 vpshufb %xmm0,%xmm1,%xmm6 @@ -637,7 +637,7 @@ FUNCTION(_aesni_ctr32_ghash_no_movbe_6x) vpshufb %xmm0,%xmm1,%xmm1 jmp .Lresume_ctr32_nmb -.align 32 +.balign 32 .Lenc_tail_nmb: vaesenc %xmm15,%xmm9,%xmm9 vmovdqu %xmm7,16+8(%rsp) @@ -818,7 +818,7 @@ ENTRY_ALIGN(aesni_gcm_decrypt, 32) .cfi_endproc SET_SIZE(aesni_gcm_decrypt) -.align 32 +.balign 32 FUNCTION(_aesni_ctr32_6x) .cfi_startproc ENDBR @@ -843,7 +843,7 @@ FUNCTION(_aesni_ctr32_6x) vpxor %xmm4,%xmm14,%xmm14 jmp .Loop_ctr32 -.align 16 +.balign 16 .Loop_ctr32: vaesenc %xmm15,%xmm9,%xmm9 vaesenc %xmm15,%xmm10,%xmm10 @@ -886,7 +886,7 @@ FUNCTION(_aesni_ctr32_6x) leaq 96(%rsi),%rsi RET -.align 32 +.balign 32 .Lhandle_ctr32_2: vpshufb %xmm0,%xmm1,%xmm6 vmovdqu 48(%r11),%xmm5 @@ -1237,7 +1237,7 @@ SET_SIZE(atomic_toggle_boolean_nv) SECTION_STATIC -.align 64 +.balign 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .Lpoly: @@ -1249,7 +1249,7 @@ SECTION_STATIC .Lone_lsb: .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 .byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 64 +.balign 64 /* Mark the stack non-executable. */ #if defined(__linux__) && defined(__ELF__) diff --git a/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S b/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S index eb9514e10c..e40b3df327 100644 --- a/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S +++ b/module/icp/asm-x86_64/modes/gcm_pclmulqdq.S @@ -102,7 +102,7 @@ gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res) { // static uint8_t byte_swap16_mask[] = { // 15, 14, 13, 12, 11, 10, 9, 8, 7, 6 ,5, 4, 3, 2, 1, 0 }; .section .rodata -.align XMM_ALIGN +.balign XMM_ALIGN .Lbyte_swap16_mask: .byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 diff --git a/module/icp/asm-x86_64/modes/ghash-x86_64.S b/module/icp/asm-x86_64/modes/ghash-x86_64.S index d48b4f2155..f62e056d4b 100644 --- a/module/icp/asm-x86_64/modes/ghash-x86_64.S +++ b/module/icp/asm-x86_64/modes/ghash-x86_64.S @@ -188,7 +188,7 @@ ENTRY_ALIGN(gcm_init_htab_avx, 32) vpxor %xmm2,%xmm6,%xmm6 movq $4,%r10 jmp .Linit_start_avx -.align 32 +.balign 32 .Linit_loop_avx: vpalignr $8,%xmm3,%xmm4,%xmm5 vmovdqu %xmm5,-16(%rdi) @@ -386,7 +386,7 @@ ENTRY_ALIGN(gcm_ghash_avx, 32) subq $0x80,%rcx jmp .Loop8x_avx -.align 32 +.balign 32 .Loop8x_avx: vpunpckhqdq %xmm15,%xmm15,%xmm8 vmovdqu 112(%rdx),%xmm14 @@ -506,7 +506,7 @@ ENTRY_ALIGN(gcm_ghash_avx, 32) addq $0x80,%rcx jmp .Ltail_no_xor_avx -.align 32 +.balign 32 .Lshort_avx: vmovdqu -16(%rdx,%rcx,1),%xmm14 leaq (%rdx,%rcx,1),%rdx @@ -610,7 +610,7 @@ ENTRY_ALIGN(gcm_ghash_avx, 32) subq $0x10,%rcx jmp .Ltail_avx -.align 32 +.balign 32 .Ltail_avx: vpxor %xmm10,%xmm15,%xmm15 .Ltail_no_xor_avx: @@ -658,7 +658,7 @@ SET_SIZE(gcm_ghash_avx) #endif /* !_WIN32 || _KERNEL */ SECTION_STATIC -.align 64 +.balign 64 .Lbswap_mask: .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 .L0x1c2_polynomial: @@ -667,7 +667,7 @@ SECTION_STATIC .long 7,0,7,0 .L7_mask_poly: .long 7,0,450,0 -.align 64 +.balign 64 SET_OBJ(.Lrem_4bit) .Lrem_4bit: .long 0,0,0,471859200,0,943718400,0,610271232 @@ -710,7 +710,7 @@ SET_OBJ(.Lrem_8bit) .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE .byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0 -.align 64 +.balign 64 /* Mark the stack non-executable. */ #if defined(__linux__) && defined(__ELF__) diff --git a/module/icp/asm-x86_64/sha2/sha256_impl.S b/module/icp/asm-x86_64/sha2/sha256_impl.S index 321d5da461..f3d7015284 100644 --- a/module/icp/asm-x86_64/sha2/sha256_impl.S +++ b/module/icp/asm-x86_64/sha2/sha256_impl.S @@ -133,7 +133,7 @@ ENTRY_NP(SHA256TransformBlocks) mov 4*7(%rdi),%r11d jmp .Lloop -.align 16 +.balign 16 .Lloop: xor %rdi,%rdi mov 4*0(%rsi),%r12d @@ -873,7 +873,7 @@ ENTRY_NP(SHA256TransformBlocks) add %r14d,%eax # h+=Maj(a,b,c) jmp .Lrounds_16_xx -.align 16 +.balign 16 .Lrounds_16_xx: mov 4(%rsp),%r13d mov 56(%rsp),%r12d @@ -2064,7 +2064,7 @@ ENTRY_NP(SHA256TransformBlocks) SET_SIZE(SHA256TransformBlocks) .section .rodata -.align 64 +.balign 64 SET_OBJ(K256) K256: .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 diff --git a/module/icp/asm-x86_64/sha2/sha512_impl.S b/module/icp/asm-x86_64/sha2/sha512_impl.S index 180f8e3660..520f5b6dab 100644 --- a/module/icp/asm-x86_64/sha2/sha512_impl.S +++ b/module/icp/asm-x86_64/sha2/sha512_impl.S @@ -134,7 +134,7 @@ ENTRY_NP(SHA512TransformBlocks) mov 8*7(%rdi),%r11 jmp .Lloop -.align 16 +.balign 16 .Lloop: xor %rdi,%rdi mov 8*0(%rsi),%r12 @@ -874,7 +874,7 @@ ENTRY_NP(SHA512TransformBlocks) add %r14,%rax # h+=Maj(a,b,c) jmp .Lrounds_16_xx -.align 16 +.balign 16 .Lrounds_16_xx: mov 8(%rsp),%r13 mov 112(%rsp),%r12 @@ -2065,7 +2065,7 @@ ENTRY_NP(SHA512TransformBlocks) SET_SIZE(SHA512TransformBlocks) .section .rodata -.align 64 +.balign 64 SET_OBJ(K512) K512: .quad 0x428a2f98d728ae22,0x7137449123ef65cd diff --git a/module/lua/setjmp/setjmp_aarch64.S b/module/lua/setjmp/setjmp_aarch64.S index a5a9a85fd5..040ef1821a 100644 --- a/module/lua/setjmp/setjmp_aarch64.S +++ b/module/lua/setjmp/setjmp_aarch64.S @@ -35,7 +35,7 @@ #define ENTRY(sym) \ .text; \ .globl sym; \ - .align 2; \ + .balign 2; \ .type sym,#function; \ sym: diff --git a/module/lua/setjmp/setjmp_arm.S b/module/lua/setjmp/setjmp_arm.S index 78bc3e0b34..0b18a96282 100644 --- a/module/lua/setjmp/setjmp_arm.S +++ b/module/lua/setjmp/setjmp_arm.S @@ -40,7 +40,7 @@ #define ENTRY(x) \ .text; \ .syntax unified; \ - .align 2; \ + .balign 2; \ .global x; \ .type x,#function; \ _FUNC_MODE; \ diff --git a/module/lua/setjmp/setjmp_i386.S b/module/lua/setjmp/setjmp_i386.S index 0d0adfc351..87f9cb08c2 100644 --- a/module/lua/setjmp/setjmp_i386.S +++ b/module/lua/setjmp/setjmp_i386.S @@ -25,7 +25,7 @@ #define ENTRY(x) \ .text; \ - .align 8; \ + .balign 8; \ .globl x; \ .type x, @function; \ x: diff --git a/module/lua/setjmp/setjmp_ppc.S b/module/lua/setjmp/setjmp_ppc.S index 72aa5d5ab5..a035cd11b3 100644 --- a/module/lua/setjmp/setjmp_ppc.S +++ b/module/lua/setjmp/setjmp_ppc.S @@ -54,7 +54,7 @@ #ifdef PPC64_ELF_ABI_v2 #define ENTRY(name) \ - .align 2 ; \ + .balign 2 ; \ .type name,@function; \ .weak name; \ name: @@ -64,7 +64,7 @@ name: #define XGLUE(a,b) a##b #define GLUE(a,b) XGLUE(a,b) #define ENTRY(name) \ - .align 2 ; \ + .balign 2 ; \ .weak name; \ .weak GLUE(.,name); \ .pushsection ".opd","aw"; \ diff --git a/module/lua/setjmp/setjmp_sparc64.S b/module/lua/setjmp/setjmp_sparc64.S index a37a71cbce..e1099643de 100644 --- a/module/lua/setjmp/setjmp_sparc64.S +++ b/module/lua/setjmp/setjmp_sparc64.S @@ -50,7 +50,7 @@ #define ENTRY(x) \ .text ; \ - .align 32 ; \ + .balign 32 ; \ .globl x ; \ .type x,@function ; \ x: