x86 asm: Replace .align with .balign

The .align directive used to align storage locations is
ambiguous. On some platforms and assemblers it takes a byte count,
on others the argument is interpreted as a shift value. The current
usage expects the first interpretation.

Replace it with the unambiguous .balign directive which always
expects a byte count, regardless of platform and assembler.

Reviewed-by: Jorgen Lundman <lundman@lundman.net>
Reviewed-by: Tino Reichardt <milky-zfs@mcmilk.de>
Reviewed-by: Richard Yao <richard.yao@alumni.stonybrook.edu>
Signed-off-by: Attila Fülöp <attila@fueloep.org>
Closes #14422
This commit is contained in:
Attila Fülöp 2023-01-23 20:25:21 +01:00 committed by Brian Behlendorf
parent 58ca7b1011
commit 037e4f2536
16 changed files with 63 additions and 63 deletions

View File

@ -127,19 +127,19 @@ extern "C" {
*/
#define ENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x; \
x:
#define ENTRY_ALIGN(x, a) \
.text; \
.align a; \
.balign a; \
.globl x; \
x:
@ -148,14 +148,14 @@ x:
*/
#define ENTRY2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x, y; \
x:; \
y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x, y; \
x:; \
y:

View File

@ -149,21 +149,21 @@ extern "C" {
#undef ENTRY
#define ENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x:
#define ENTRY_ALIGN(x, a) \
.text; \
.align a; \
.balign a; \
.globl x; \
.type x, @function; \
x:
@ -177,7 +177,7 @@ x:
*/
#define ENTRY2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
@ -186,7 +186,7 @@ y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \

View File

@ -130,19 +130,19 @@ extern "C" {
*/
#define ENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x; \
x:
#define ENTRY_ALIGN(x, a) \
.text; \
.align a; \
.balign a; \
.globl x; \
x:
@ -155,14 +155,14 @@ x:
*/
#define ENTRY2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x, y; \
x:; \
y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x, y; \
x:; \
y:

View File

@ -149,21 +149,21 @@ extern "C" {
#undef ENTRY
#define ENTRY(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x: MCOUNT(x)
#define ENTRY_NP(x) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x; \
.type x, @function; \
x:
#define ENTRY_ALIGN(x, a) \
.text; \
.align a; \
.balign a; \
.globl x; \
.type x, @function; \
x:
@ -177,7 +177,7 @@ x:
*/
#define ENTRY2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \
@ -186,7 +186,7 @@ y: MCOUNT(x)
#define ENTRY_NP2(x, y) \
.text; \
.align ASM_ENTRY_ALIGN; \
.balign ASM_ENTRY_ALIGN; \
.globl x, y; \
.type x, @function; \
.type y, @function; \

View File

@ -378,7 +378,7 @@ rijndael_key_setup_enc_intel_local:
FRAME_END
RET
.align 4
.balign 4
.Lenc_key192:
cmp $192, %KEYSIZE32
jnz .Lenc_key128
@ -415,7 +415,7 @@ rijndael_key_setup_enc_intel_local:
FRAME_END
RET
.align 4
.balign 4
.Lenc_key128:
cmp $128, %KEYSIZE32
jnz .Lenc_key_invalid_key_bits
@ -522,7 +522,7 @@ FRAME_BEGIN
add %AESKEY, %ROUNDS64
mov %ROUNDS64, %ENDAESKEY
.align 4
.balign 4
.Ldec_key_reorder_loop:
movups (%AESKEY), %xmm0
movups (%ROUNDS64), %xmm1
@ -533,7 +533,7 @@ FRAME_BEGIN
cmp %AESKEY, %ROUNDS64
ja .Ldec_key_reorder_loop
.align 4
.balign 4
.Ldec_key_inv_loop:
movups (%rcx), %xmm0
// Convert an encryption round key to a form usable for decryption
@ -622,7 +622,7 @@ ENTRY_NP(aes_encrypt_intel)
movups -0x50(%KEYP), %KEY
aesenc %KEY, %STATE
.align 4
.balign 4
.Lenc192:
// AES 192 and 256
movups -0x40(%KEYP), %KEY
@ -630,7 +630,7 @@ ENTRY_NP(aes_encrypt_intel)
movups -0x30(%KEYP), %KEY
aesenc %KEY, %STATE
.align 4
.balign 4
.Lenc128:
// AES 128, 192, and 256
movups -0x20(%KEYP), %KEY
@ -705,7 +705,7 @@ ENTRY_NP(aes_decrypt_intel)
movups -0x50(%KEYP), %KEY
aesdec %KEY, %STATE
.align 4
.balign 4
.Ldec192:
// AES 192 and 256
movups -0x40(%KEYP), %KEY
@ -713,7 +713,7 @@ ENTRY_NP(aes_decrypt_intel)
movups -0x30(%KEYP), %KEY
aesdec %KEY, %STATE
.align 4
.balign 4
.Ldec128:
// AES 128, 192, and 256
movups -0x20(%KEYP), %KEY

View File

@ -694,7 +694,7 @@ aes_decrypt_amd64(const uint32_t rk[], int Nr, const uint32_t ct[4],
* unsigned char *out, const aes_encrypt_ctx cx[1])/
*/
SECTION_STATIC
.align 64
.balign 64
enc_tab:
enc_vals(u8)
#ifdef LAST_ROUND_TABLES
@ -800,7 +800,7 @@ ENTRY_NP(aes_encrypt_amd64)
* unsigned char *out, const aes_encrypt_ctx cx[1])/
*/
SECTION_STATIC
.align 64
.balign 64
dec_tab:
dec_vals(v8)
#ifdef LAST_ROUND_TABLES

View File

@ -58,7 +58,7 @@
.text
#ifdef HAVE_MOVBE
.align 32
.balign 32
FUNCTION(_aesni_ctr32_ghash_6x)
.cfi_startproc
ENDBR
@ -75,7 +75,7 @@ FUNCTION(_aesni_ctr32_ghash_6x)
vmovdqu %xmm4,16+8(%rsp)
jmp .Loop6x
.align 32
.balign 32
.Loop6x:
addl $100663296,%ebx
jc .Lhandle_ctr32
@ -287,7 +287,7 @@ FUNCTION(_aesni_ctr32_ghash_6x)
vmovups 224-128(%rcx),%xmm1
jmp .Lenc_tail
.align 32
.balign 32
.Lhandle_ctr32:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
@ -309,7 +309,7 @@ FUNCTION(_aesni_ctr32_ghash_6x)
vpshufb %xmm0,%xmm1,%xmm1
jmp .Lresume_ctr32
.align 32
.balign 32
.Lenc_tail:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
@ -374,7 +374,7 @@ FUNCTION(_aesni_ctr32_ghash_6x)
SET_SIZE(_aesni_ctr32_ghash_6x)
#endif /* ifdef HAVE_MOVBE */
.align 32
.balign 32
FUNCTION(_aesni_ctr32_ghash_no_movbe_6x)
.cfi_startproc
ENDBR
@ -391,7 +391,7 @@ FUNCTION(_aesni_ctr32_ghash_no_movbe_6x)
vmovdqu %xmm4,16+8(%rsp)
jmp .Loop6x_nmb
.align 32
.balign 32
.Loop6x_nmb:
addl $100663296,%ebx
jc .Lhandle_ctr32_nmb
@ -615,7 +615,7 @@ FUNCTION(_aesni_ctr32_ghash_no_movbe_6x)
vmovups 224-128(%rcx),%xmm1
jmp .Lenc_tail_nmb
.align 32
.balign 32
.Lhandle_ctr32_nmb:
vmovdqu (%r11),%xmm0
vpshufb %xmm0,%xmm1,%xmm6
@ -637,7 +637,7 @@ FUNCTION(_aesni_ctr32_ghash_no_movbe_6x)
vpshufb %xmm0,%xmm1,%xmm1
jmp .Lresume_ctr32_nmb
.align 32
.balign 32
.Lenc_tail_nmb:
vaesenc %xmm15,%xmm9,%xmm9
vmovdqu %xmm7,16+8(%rsp)
@ -818,7 +818,7 @@ ENTRY_ALIGN(aesni_gcm_decrypt, 32)
.cfi_endproc
SET_SIZE(aesni_gcm_decrypt)
.align 32
.balign 32
FUNCTION(_aesni_ctr32_6x)
.cfi_startproc
ENDBR
@ -843,7 +843,7 @@ FUNCTION(_aesni_ctr32_6x)
vpxor %xmm4,%xmm14,%xmm14
jmp .Loop_ctr32
.align 16
.balign 16
.Loop_ctr32:
vaesenc %xmm15,%xmm9,%xmm9
vaesenc %xmm15,%xmm10,%xmm10
@ -886,7 +886,7 @@ FUNCTION(_aesni_ctr32_6x)
leaq 96(%rsi),%rsi
RET
.align 32
.balign 32
.Lhandle_ctr32_2:
vpshufb %xmm0,%xmm1,%xmm6
vmovdqu 48(%r11),%xmm5
@ -1237,7 +1237,7 @@ SET_SIZE(atomic_toggle_boolean_nv)
SECTION_STATIC
.align 64
.balign 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.Lpoly:
@ -1249,7 +1249,7 @@ SECTION_STATIC
.Lone_lsb:
.byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
.byte 65,69,83,45,78,73,32,71,67,77,32,109,111,100,117,108,101,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.balign 64
/* Mark the stack non-executable. */
#if defined(__linux__) && defined(__ELF__)

View File

@ -102,7 +102,7 @@ gcm_mul_pclmulqdq(uint64_t *x_in, uint64_t *y, uint64_t *res) {
// static uint8_t byte_swap16_mask[] = {
// 15, 14, 13, 12, 11, 10, 9, 8, 7, 6 ,5, 4, 3, 2, 1, 0 };
.section .rodata
.align XMM_ALIGN
.balign XMM_ALIGN
.Lbyte_swap16_mask:
.byte 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0

View File

@ -188,7 +188,7 @@ ENTRY_ALIGN(gcm_init_htab_avx, 32)
vpxor %xmm2,%xmm6,%xmm6
movq $4,%r10
jmp .Linit_start_avx
.align 32
.balign 32
.Linit_loop_avx:
vpalignr $8,%xmm3,%xmm4,%xmm5
vmovdqu %xmm5,-16(%rdi)
@ -386,7 +386,7 @@ ENTRY_ALIGN(gcm_ghash_avx, 32)
subq $0x80,%rcx
jmp .Loop8x_avx
.align 32
.balign 32
.Loop8x_avx:
vpunpckhqdq %xmm15,%xmm15,%xmm8
vmovdqu 112(%rdx),%xmm14
@ -506,7 +506,7 @@ ENTRY_ALIGN(gcm_ghash_avx, 32)
addq $0x80,%rcx
jmp .Ltail_no_xor_avx
.align 32
.balign 32
.Lshort_avx:
vmovdqu -16(%rdx,%rcx,1),%xmm14
leaq (%rdx,%rcx,1),%rdx
@ -610,7 +610,7 @@ ENTRY_ALIGN(gcm_ghash_avx, 32)
subq $0x10,%rcx
jmp .Ltail_avx
.align 32
.balign 32
.Ltail_avx:
vpxor %xmm10,%xmm15,%xmm15
.Ltail_no_xor_avx:
@ -658,7 +658,7 @@ SET_SIZE(gcm_ghash_avx)
#endif /* !_WIN32 || _KERNEL */
SECTION_STATIC
.align 64
.balign 64
.Lbswap_mask:
.byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
.L0x1c2_polynomial:
@ -667,7 +667,7 @@ SECTION_STATIC
.long 7,0,7,0
.L7_mask_poly:
.long 7,0,450,0
.align 64
.balign 64
SET_OBJ(.Lrem_4bit)
.Lrem_4bit:
.long 0,0,0,471859200,0,943718400,0,610271232
@ -710,7 +710,7 @@ SET_OBJ(.Lrem_8bit)
.value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
.byte 71,72,65,83,72,32,102,111,114,32,120,56,54,95,54,52,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
.align 64
.balign 64
/* Mark the stack non-executable. */
#if defined(__linux__) && defined(__ELF__)

View File

@ -133,7 +133,7 @@ ENTRY_NP(SHA256TransformBlocks)
mov 4*7(%rdi),%r11d
jmp .Lloop
.align 16
.balign 16
.Lloop:
xor %rdi,%rdi
mov 4*0(%rsi),%r12d
@ -873,7 +873,7 @@ ENTRY_NP(SHA256TransformBlocks)
add %r14d,%eax # h+=Maj(a,b,c)
jmp .Lrounds_16_xx
.align 16
.balign 16
.Lrounds_16_xx:
mov 4(%rsp),%r13d
mov 56(%rsp),%r12d
@ -2064,7 +2064,7 @@ ENTRY_NP(SHA256TransformBlocks)
SET_SIZE(SHA256TransformBlocks)
.section .rodata
.align 64
.balign 64
SET_OBJ(K256)
K256:
.long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5

View File

@ -134,7 +134,7 @@ ENTRY_NP(SHA512TransformBlocks)
mov 8*7(%rdi),%r11
jmp .Lloop
.align 16
.balign 16
.Lloop:
xor %rdi,%rdi
mov 8*0(%rsi),%r12
@ -874,7 +874,7 @@ ENTRY_NP(SHA512TransformBlocks)
add %r14,%rax # h+=Maj(a,b,c)
jmp .Lrounds_16_xx
.align 16
.balign 16
.Lrounds_16_xx:
mov 8(%rsp),%r13
mov 112(%rsp),%r12
@ -2065,7 +2065,7 @@ ENTRY_NP(SHA512TransformBlocks)
SET_SIZE(SHA512TransformBlocks)
.section .rodata
.align 64
.balign 64
SET_OBJ(K512)
K512:
.quad 0x428a2f98d728ae22,0x7137449123ef65cd

View File

@ -35,7 +35,7 @@
#define ENTRY(sym) \
.text; \
.globl sym; \
.align 2; \
.balign 2; \
.type sym,#function; \
sym:

View File

@ -40,7 +40,7 @@
#define ENTRY(x) \
.text; \
.syntax unified; \
.align 2; \
.balign 2; \
.global x; \
.type x,#function; \
_FUNC_MODE; \

View File

@ -25,7 +25,7 @@
#define ENTRY(x) \
.text; \
.align 8; \
.balign 8; \
.globl x; \
.type x, @function; \
x:

View File

@ -54,7 +54,7 @@
#ifdef PPC64_ELF_ABI_v2
#define ENTRY(name) \
.align 2 ; \
.balign 2 ; \
.type name,@function; \
.weak name; \
name:
@ -64,7 +64,7 @@ name:
#define XGLUE(a,b) a##b
#define GLUE(a,b) XGLUE(a,b)
#define ENTRY(name) \
.align 2 ; \
.balign 2 ; \
.weak name; \
.weak GLUE(.,name); \
.pushsection ".opd","aw"; \

View File

@ -50,7 +50,7 @@
#define ENTRY(x) \
.text ; \
.align 32 ; \
.balign 32 ; \
.globl x ; \
.type x,@function ; \
x: