summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-10-30 09:31:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-10-30 09:31:15 -0700
commit1960e8eabcba2749db9443adb2a5d93e4dabf590 (patch)
tree17a757b4d80872d99be54dfd2caa0d83d3c00125 /arch/x86
parent0b07194bb55ed836c2cc7c22e866b87a14681984 (diff)
parent4635742d1cef5ee5f217f89310a8782ebb4e25dd (diff)
downloadlinux-1960e8eabcba2749db9443adb2a5d93e4dabf590.tar.gz
linux-1960e8eabcba2749db9443adb2a5d93e4dabf590.tar.bz2
linux-1960e8eabcba2749db9443adb2a5d93e4dabf590.zip
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto fix from Herbert Xu: "This fixes an objtool regression" * 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: crypto: x86/chacha20 - satisfy stack validation 2.0
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/crypto/chacha20-avx2-x86_64.S4
-rw-r--r--arch/x86/crypto/chacha20-ssse3-x86_64.S4
2 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/crypto/chacha20-avx2-x86_64.S b/arch/x86/crypto/chacha20-avx2-x86_64.S
index 3a2dc3dc6cac..f3cd26f48332 100644
--- a/arch/x86/crypto/chacha20-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha20-avx2-x86_64.S
@@ -45,7 +45,7 @@ ENTRY(chacha20_8block_xor_avx2)
vzeroupper
# 4 * 32 byte stack, 32-byte aligned
- mov %rsp, %r8
+ lea 8(%rsp),%r10
and $~31, %rsp
sub $0x80, %rsp
@@ -443,6 +443,6 @@ ENTRY(chacha20_8block_xor_avx2)
vmovdqu %ymm15,0x01e0(%rsi)
vzeroupper
- mov %r8,%rsp
+ lea -8(%r10),%rsp
ret
ENDPROC(chacha20_8block_xor_avx2)
diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S
index 3f511a7d73b8..512a2b500fd1 100644
--- a/arch/x86/crypto/chacha20-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S
@@ -160,7 +160,7 @@ ENTRY(chacha20_4block_xor_ssse3)
# done with the slightly better performing SSSE3 byte shuffling,
# 7/12-bit word rotation uses traditional shift+OR.
- mov %rsp,%r11
+ lea 8(%rsp),%r10
sub $0x80,%rsp
and $~63,%rsp
@@ -625,6 +625,6 @@ ENTRY(chacha20_4block_xor_ssse3)
pxor %xmm1,%xmm15
movdqu %xmm15,0xf0(%rsi)
- mov %r11,%rsp
+ lea -8(%r10),%rsp
ret
ENDPROC(chacha20_4block_xor_ssse3)