summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-02-23 12:42:28 -0800
committerSean Christopherson <seanjc@google.com>2024-04-09 10:20:29 -0700
commit331282fdb15edaf1beb1d27a64d3f65a34d7394d (patch)
treeba9c5130065442b2b0e8b8989ebf22488e748a2c /arch
parent7774c8f32e99b1f314c0df7c204a897792b4f378 (diff)
downloadlinux-stable-331282fdb15edaf1beb1d27a64d3f65a34d7394d.tar.gz
linux-stable-331282fdb15edaf1beb1d27a64d3f65a34d7394d.tar.bz2
linux-stable-331282fdb15edaf1beb1d27a64d3f65a34d7394d.zip
KVM: SVM: Drop 32-bit "support" from __svm_sev_es_vcpu_run()
Drop 32-bit "support" from __svm_sev_es_vcpu_run(), as SEV/SEV-ES firmly 64-bit only. The "support" was purely the result of bad copy+paste from __svm_vcpu_run(), which in turn was slightly less bad copy+paste from __vmx_vcpu_run(). Opportunistically convert to unadulterated register accesses so that it's easier (but still not easy) to follow which registers hold what arguments, and when. Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Link: https://lore.kernel.org/r/20240223204233.3337324-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/svm/vmenter.S44
1 files changed, 13 insertions, 31 deletions
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 6f57d496867a..c057866a459b 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -298,17 +298,12 @@ SYM_FUNC_END(__svm_vcpu_run)
* @spec_ctrl_intercepted: bool
*/
SYM_FUNC_START(__svm_sev_es_vcpu_run)
- push %_ASM_BP
-#ifdef CONFIG_X86_64
+ push %rbp
push %r15
push %r14
push %r13
push %r12
-#else
- push %edi
- push %esi
-#endif
- push %_ASM_BX
+ push %rbx
/*
* Save variables needed after vmexit on the stack, in inverse
@@ -316,39 +311,31 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
*/
/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
- push %_ASM_ARG2
+ push %rsi
/* Save @svm. */
- push %_ASM_ARG1
-
-.ifnc _ASM_ARG1, _ASM_DI
- /*
- * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
- * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
- */
- mov %_ASM_ARG1, %_ASM_DI
-.endif
+ push %rdi
/* Clobbers RAX, RCX, RDX. */
RESTORE_GUEST_SPEC_CTRL
/* Get svm->current_vmcb->pa into RAX. */
- mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
- mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
+ mov SVM_current_vmcb(%rdi), %rax
+ mov KVM_VMCB_pa(%rax), %rax
/* Enter guest mode */
sti
-1: vmrun %_ASM_AX
+1: vmrun %rax
2: cli
/* Pop @svm to RDI, guest registers have been saved already. */
- pop %_ASM_DI
+ pop %rdi
#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
- FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
+ FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
#endif
/* Clobbers RAX, RCX, RDX. */
@@ -364,26 +351,21 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
UNTRAIN_RET_VM
/* "Pop" @spec_ctrl_intercepted. */
- pop %_ASM_BX
+ pop %rbx
- pop %_ASM_BX
+ pop %rbx
-#ifdef CONFIG_X86_64
pop %r12
pop %r13
pop %r14
pop %r15
-#else
- pop %esi
- pop %edi
-#endif
- pop %_ASM_BP
+ pop %rbp
RET
RESTORE_GUEST_SPEC_CTRL_BODY
RESTORE_HOST_SPEC_CTRL_BODY
-3: cmpb $0, _ASM_RIP(kvm_rebooting)
+3: cmpb $0, kvm_rebooting(%rip)
jne 2b
ud2