diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2022-09-15 13:10:58 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2022-10-17 16:41:02 +0200 |
commit | 2f93238b87ddbbe1b050ec48ab5843fc61346adb (patch) | |
tree | f8761d5c121d9259ef824abccafb2599ed7569a8 /arch/x86/crypto/sm4-aesni-avx-asm_64.S | |
parent | 3ba56d0b87113785413dfc5b9910d45001cc4eeb (diff) | |
download | linux-stable-2f93238b87ddbbe1b050ec48ab5843fc61346adb.tar.gz linux-stable-2f93238b87ddbbe1b050ec48ab5843fc61346adb.tar.bz2 linux-stable-2f93238b87ddbbe1b050ec48ab5843fc61346adb.zip |
crypto: x86/sm[34]: Remove redundant alignments
SYM_FUNC_START*() and friends already imply alignment, remove custom
alignment hacks to make code consistent. This prepares for future
function call ABI changes.
Also, with having pushed the function alignment to 16 bytes, this
custom alignment is completely superfluous.
( this code couldn't seem to make up it's mind about what alignment it
actually wanted, randomly mixing 8 and 16 bytes )
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111144.868540856@infradead.org
Diffstat (limited to 'arch/x86/crypto/sm4-aesni-avx-asm_64.S')
-rw-r--r-- | arch/x86/crypto/sm4-aesni-avx-asm_64.S | 7 |
1 files changed, 0 insertions, 7 deletions
diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S index 4767ab61ff48..e13c8537b2ec 100644 --- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S +++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S @@ -139,13 +139,11 @@ .text -.align 16 /* * void sm4_aesni_avx_crypt4(const u32 *rk, u8 *dst, * const u8 *src, int nblocks) */ -.align 8 SYM_FUNC_START(sm4_aesni_avx_crypt4) /* input: * %rdi: round key array, CTX @@ -249,7 +247,6 @@ SYM_FUNC_START(sm4_aesni_avx_crypt4) RET; SYM_FUNC_END(sm4_aesni_avx_crypt4) -.align 8 SYM_FUNC_START_LOCAL(__sm4_crypt_blk8) /* input: * %rdi: round key array, CTX @@ -363,7 +360,6 @@ SYM_FUNC_END(__sm4_crypt_blk8) * void sm4_aesni_avx_crypt8(const u32 *rk, u8 *dst, * const u8 *src, int nblocks) */ -.align 8 SYM_FUNC_START(sm4_aesni_avx_crypt8) /* input: * %rdi: round key array, CTX @@ -419,7 +415,6 @@ SYM_FUNC_END(sm4_aesni_avx_crypt8) * void sm4_aesni_avx_ctr_enc_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8) /* input: * %rdi: round key array, CTX @@ -494,7 +489,6 @@ SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8) * void sm4_aesni_avx_cbc_dec_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8) /* input: * %rdi: round key array, CTX @@ -544,7 +538,6 @@ SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8) * void sm4_aesni_avx_cfb_dec_blk8(const u32 *rk, u8 *dst, * const u8 *src, u8 *iv) */ -.align 8 SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8) /* input: * %rdi: round key array, CTX |