summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/mem_encrypt_boot.S
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2018-01-10 13:26:26 -0600
committerIngo Molnar <mingo@kernel.org>2018-01-16 01:50:58 +0100
commitcc5f01e28d6c60f274fd1e33b245f679f79f543c (patch)
tree8335713024cc0e1d16c8e07c06ba8cc45ea3ae29 /arch/x86/mm/mem_encrypt_boot.S
parent2b5d00b6c2cdd94f6d6a494a6f6c0c0fc7b8e711 (diff)
downloadlinux-cc5f01e28d6c60f274fd1e33b245f679f79f543c.tar.gz
linux-cc5f01e28d6c60f274fd1e33b245f679f79f543c.tar.bz2
linux-cc5f01e28d6c60f274fd1e33b245f679f79f543c.zip
x86/mm: Prepare sme_encrypt_kernel() for PAGE aligned encryption
In preparation for encrypting more than just the kernel, the encryption support in sme_encrypt_kernel() needs to support 4KB page aligned encryption instead of just 2MB large page aligned encryption. Update the routines that populate the PGD to support non-2MB aligned addresses. This is done by creating PTE page tables for the start and end portion of the address range that fall outside of the 2MB alignment. This results in, at most, two extra pages to hold the PTE entries for each mapping of a range. Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20180110192626.6026.75387.stgit@tlendack-t1.amdoffice.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm/mem_encrypt_boot.S')
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S20
1 files changed, 14 insertions, 6 deletions
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index de3688461145..23a8a9e411ea 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -104,6 +104,7 @@ ENTRY(__enc_copy)
mov %rdx, %cr4
push %r15
+ push %r12
movq %rcx, %r9 /* Save kernel length */
movq %rdi, %r10 /* Save encrypted kernel address */
@@ -119,21 +120,27 @@ ENTRY(__enc_copy)
wbinvd /* Invalidate any cache entries */
- /* Copy/encrypt 2MB at a time */
+ /* Copy/encrypt up to 2MB at a time */
+ movq $PMD_PAGE_SIZE, %r12
1:
+ cmpq %r12, %r9
+ jnb 2f
+ movq %r9, %r12
+
+2:
movq %r11, %rsi /* Source - decrypted kernel */
movq %r8, %rdi /* Dest - intermediate copy buffer */
- movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
+ movq %r12, %rcx
rep movsb
movq %r8, %rsi /* Source - intermediate copy buffer */
movq %r10, %rdi /* Dest - encrypted kernel */
- movq $PMD_PAGE_SIZE, %rcx /* 2MB length */
+ movq %r12, %rcx
rep movsb
- addq $PMD_PAGE_SIZE, %r11
- addq $PMD_PAGE_SIZE, %r10
- subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */
+ addq %r12, %r11
+ addq %r12, %r10
+ subq %r12, %r9 /* Kernel length decrement */
jnz 1b /* Kernel length not zero? */
/* Restore PAT register */
@@ -142,6 +149,7 @@ ENTRY(__enc_copy)
mov %r15, %rdx /* Restore original PAT value */
wrmsr
+ pop %r12
pop %r15
ret