diff options
Diffstat (limited to 'arch/arm64/kvm/hyp.S')
-rw-r--r-- | arch/arm64/kvm/hyp.S | 600 |
1 files changed, 494 insertions, 106 deletions
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index b0d1512acf08..b72aa9f9215c 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -16,11 +16,11 @@ */ #include <linux/linkage.h> -#include <linux/irqchip/arm-gic.h> #include <asm/assembler.h> #include <asm/memory.h> #include <asm/asm-offsets.h> +#include <asm/debug-monitors.h> #include <asm/fpsimdmacros.h> #include <asm/kvm.h> #include <asm/kvm_asm.h> @@ -36,9 +36,6 @@ .pushsection .hyp.text, "ax" .align PAGE_SHIFT -__kvm_hyp_code_start: - .globl __kvm_hyp_code_start - .macro save_common_regs // x2: base address for cpu context // x3: tmp register @@ -215,6 +212,7 @@ __kvm_hyp_code_start: mrs x22, amair_el1 mrs x23, cntkctl_el1 mrs x24, par_el1 + mrs x25, mdscr_el1 stp x4, x5, [x3] stp x6, x7, [x3, #16] @@ -226,7 +224,202 @@ __kvm_hyp_code_start: stp x18, x19, [x3, #112] stp x20, x21, [x3, #128] stp x22, x23, [x3, #144] - str x24, [x3, #160] + stp x24, x25, [x3, #160] +.endm + +.macro save_debug + // x2: base address for cpu context + // x3: tmp register + + mrs x26, id_aa64dfr0_el1 + ubfx x24, x26, #12, #4 // Extract BRPs + ubfx x25, x26, #20, #4 // Extract WRPs + mov w26, #15 + sub w24, w26, w24 // How many BPs to skip + sub w25, w26, w25 // How many WPs to skip + + add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1) + + adr x26, 1f + add x26, x26, x24, lsl #2 + br x26 +1: + mrs x20, dbgbcr15_el1 + mrs x19, dbgbcr14_el1 + mrs x18, dbgbcr13_el1 + mrs x17, dbgbcr12_el1 + mrs x16, dbgbcr11_el1 + mrs x15, dbgbcr10_el1 + mrs x14, dbgbcr9_el1 + mrs x13, dbgbcr8_el1 + mrs x12, dbgbcr7_el1 + mrs x11, dbgbcr6_el1 + mrs x10, dbgbcr5_el1 + mrs x9, dbgbcr4_el1 + mrs x8, dbgbcr3_el1 + mrs x7, dbgbcr2_el1 + mrs x6, dbgbcr1_el1 + mrs x5, dbgbcr0_el1 + + adr x26, 1f + add x26, x26, x24, lsl #2 + br x26 + +1: + str x20, [x3, #(15 * 8)] + str x19, [x3, #(14 * 8)] + str x18, [x3, #(13 * 8)] + str x17, [x3, #(12 * 8)] + str x16, [x3, #(11 * 8)] + str x15, [x3, #(10 * 8)] + str x14, [x3, #(9 * 8)] + str x13, [x3, #(8 * 8)] + str x12, [x3, #(7 * 8)] + str x11, [x3, #(6 * 8)] + str x10, [x3, #(5 * 8)] + str x9, [x3, #(4 * 8)] + str x8, [x3, #(3 * 8)] + str x7, [x3, #(2 * 8)] + str x6, [x3, #(1 * 8)] + str x5, [x3, #(0 * 8)] + + add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1) + + adr x26, 1f + add x26, x26, x24, lsl #2 + br x26 +1: + mrs x20, dbgbvr15_el1 + mrs x19, dbgbvr14_el1 + mrs x18, dbgbvr13_el1 + mrs x17, dbgbvr12_el1 + mrs x16, dbgbvr11_el1 + mrs x15, dbgbvr10_el1 + mrs x14, dbgbvr9_el1 + mrs x13, dbgbvr8_el1 + mrs x12, dbgbvr7_el1 + mrs x11, dbgbvr6_el1 + mrs x10, dbgbvr5_el1 + mrs x9, dbgbvr4_el1 + mrs x8, dbgbvr3_el1 + mrs x7, dbgbvr2_el1 + mrs x6, dbgbvr1_el1 + mrs x5, dbgbvr0_el1 + + adr x26, 1f + add x26, x26, x24, lsl #2 + br x26 + +1: + str x20, [x3, #(15 * 8)] + str x19, [x3, #(14 * 8)] + str x18, [x3, #(13 * 8)] + str x17, [x3, #(12 * 8)] + str x16, [x3, #(11 * 8)] + str x15, [x3, #(10 * 8)] + str x14, [x3, #(9 * 8)] + str x13, [x3, #(8 * 8)] + str x12, [x3, #(7 * 8)] + str x11, [x3, #(6 * 8)] + str x10, [x3, #(5 * 8)] + str x9, [x3, #(4 * 8)] + str x8, [x3, #(3 * 8)] + str x7, [x3, #(2 * 8)] + str x6, [x3, #(1 * 8)] + str x5, [x3, #(0 * 8)] + + add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1) + + adr x26, 1f + add x26, x26, x25, lsl #2 + br x26 +1: + mrs x20, dbgwcr15_el1 + mrs x19, dbgwcr14_el1 + mrs x18, dbgwcr13_el1 + mrs x17, dbgwcr12_el1 + mrs x16, dbgwcr11_el1 + mrs x15, dbgwcr10_el1 + mrs x14, dbgwcr9_el1 + mrs x13, dbgwcr8_el1 + mrs x12, dbgwcr7_el1 + mrs x11, dbgwcr6_el1 + mrs x10, dbgwcr5_el1 + mrs x9, dbgwcr4_el1 + mrs x8, dbgwcr3_el1 + mrs x7, dbgwcr2_el1 + mrs x6, dbgwcr1_el1 + mrs x5, dbgwcr0_el1 + + adr x26, 1f + add x26, x26, x25, lsl #2 + br x26 + +1: + str x20, [x3, #(15 * 8)] + str x19, [x3, #(14 * 8)] + str x18, [x3, #(13 * 8)] + str x17, [x3, #(12 * 8)] + str x16, [x3, #(11 * 8)] + str x15, [x3, #(10 * 8)] + str x14, [x3, #(9 * 8)] + str x13, [x3, #(8 * 8)] + str x12, [x3, #(7 * 8)] + str x11, [x3, #(6 * 8)] + str x10, [x3, #(5 * 8)] + str x9, [x3, #(4 * 8)] + str x8, [x3, #(3 * 8)] + str x7, [x3, #(2 * 8)] + str x6, [x3, #(1 * 8)] + str x5, [x3, #(0 * 8)] + + add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1) + + adr x26, 1f + add x26, x26, x25, lsl #2 + br x26 +1: + mrs x20, dbgwvr15_el1 + mrs x19, dbgwvr14_el1 + mrs x18, dbgwvr13_el1 + mrs x17, dbgwvr12_el1 + mrs x16, dbgwvr11_el1 + mrs x15, dbgwvr10_el1 + mrs x14, dbgwvr9_el1 + mrs x13, dbgwvr8_el1 + mrs x12, dbgwvr7_el1 + mrs x11, dbgwvr6_el1 + mrs x10, dbgwvr5_el1 + mrs x9, dbgwvr4_el1 + mrs x8, dbgwvr3_el1 + mrs x7, dbgwvr2_el1 + mrs x6, dbgwvr1_el1 + mrs x5, dbgwvr0_el1 + + adr x26, 1f + add x26, x26, x25, lsl #2 + br x26 + +1: + str x20, [x3, #(15 * 8)] + str x19, [x3, #(14 * 8)] + str x18, [x3, #(13 * 8)] + str x17, [x3, #(12 * 8)] + str x16, [x3, #(11 * 8)] + str x15, [x3, #(10 * 8)] + str x14, [x3, #(9 * 8)] + str x13, [x3, #(8 * 8)] + str x12, [x3, #(7 * 8)] + str x11, [x3, #(6 * 8)] + str x10, [x3, #(5 * 8)] + str x9, [x3, #(4 * 8)] + str x8, [x3, #(3 * 8)] + str x7, [x3, #(2 * 8)] + str x6, [x3, #(1 * 8)] + str x5, [x3, #(0 * 8)] + + mrs x21, mdccint_el1 + str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] .endm .macro restore_sysregs @@ -245,7 +438,7 @@ __kvm_hyp_code_start: ldp x18, x19, [x3, #112] ldp x20, x21, [x3, #128] ldp x22, x23, [x3, #144] - ldr x24, [x3, #160] + ldp x24, x25, [x3, #160] msr vmpidr_el2, x4 msr csselr_el1, x5 @@ -268,6 +461,198 @@ __kvm_hyp_code_start: msr amair_el1, x22 msr cntkctl_el1, x23 msr par_el1, x24 + msr mdscr_el1, x25 +.endm + +.macro restore_debug + // x2: base address for cpu context + // x3: tmp register + + mrs x26, id_aa64dfr0_el1 + ubfx x24, x26, #12, #4 // Extract BRPs + ubfx x25, x26, #20, #4 // Extract WRPs + mov w26, #15 + sub w24, w26, w24 // How many BPs to skip + sub w25, w26, w25 // How many WPs to skip + + add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1) + + adr x26, 1f + add x26, x26, x24, lsl #2 + br x26 +1: + ldr x20, [x3, #(15 * 8)] + ldr x19, [x3, #(14 * 8)] + ldr x18, [x3, #(13 * 8)] + ldr x17, [x3, #(12 * 8)] + ldr x16, [x3, #(11 * 8)] + ldr x15, [x3, #(10 * 8)] + ldr x14, [x3, #(9 * 8)] + ldr x13, [x3, #(8 * 8)] + ldr x12, [x3, #(7 * 8)] + ldr x11, [x3, #(6 * 8)] + ldr x10, [x3, #(5 * 8)] + ldr x9, [x3, #(4 * 8)] + ldr x8, [x3, #(3 * 8)] + ldr x7, [x3, #(2 * 8)] + ldr x6, [x3, #(1 * 8)] + ldr x5, [x3, #(0 * 8)] + + adr x26, 1f + add x26, x26, x24, lsl #2 + br x26 +1: + msr dbgbcr15_el1, x20 + msr dbgbcr14_el1, x19 + msr dbgbcr13_el1, x18 + msr dbgbcr12_el1, x17 + msr dbgbcr11_el1, x16 + msr dbgbcr10_el1, x15 + msr dbgbcr9_el1, x14 + msr dbgbcr8_el1, x13 + msr dbgbcr7_el1, x12 + msr dbgbcr6_el1, x11 + msr dbgbcr5_el1, x10 + msr dbgbcr4_el1, x9 + msr dbgbcr3_el1, x8 + msr dbgbcr2_el1, x7 + msr dbgbcr1_el1, x6 + msr dbgbcr0_el1, x5 + + add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1) + + adr x26, 1f + add x26, x26, x24, lsl #2 + br x26 +1: + ldr x20, [x3, #(15 * 8)] + ldr x19, [x3, #(14 * 8)] + ldr x18, [x3, #(13 * 8)] + ldr x17, [x3, #(12 * 8)] + ldr x16, [x3, #(11 * 8)] + ldr x15, [x3, #(10 * 8)] + ldr x14, [x3, #(9 * 8)] + ldr x13, [x3, #(8 * 8)] + ldr x12, [x3, #(7 * 8)] + ldr x11, [x3, #(6 * 8)] + ldr x10, [x3, #(5 * 8)] + ldr x9, [x3, #(4 * 8)] + ldr x8, [x3, #(3 * 8)] + ldr x7, [x3, #(2 * 8)] + ldr x6, [x3, #(1 * 8)] + ldr x5, [x3, #(0 * 8)] + + adr x26, 1f + add x26, x26, x24, lsl #2 + br x26 +1: + msr dbgbvr15_el1, x20 + msr dbgbvr14_el1, x19 + msr dbgbvr13_el1, x18 + msr dbgbvr12_el1, x17 + msr dbgbvr11_el1, x16 + msr dbgbvr10_el1, x15 + msr dbgbvr9_el1, x14 + msr dbgbvr8_el1, x13 + msr dbgbvr7_el1, x12 + msr dbgbvr6_el1, x11 + msr dbgbvr5_el1, x10 + msr dbgbvr4_el1, x9 + msr dbgbvr3_el1, x8 + msr dbgbvr2_el1, x7 + msr dbgbvr1_el1, x6 + msr dbgbvr0_el1, x5 + + add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1) + + adr x26, 1f + add x26, x26, x25, lsl #2 + br x26 +1: + ldr x20, [x3, #(15 * 8)] + ldr x19, [x3, #(14 * 8)] + ldr x18, [x3, #(13 * 8)] + ldr x17, [x3, #(12 * 8)] + ldr x16, [x3, #(11 * 8)] + ldr x15, [x3, #(10 * 8)] + ldr x14, [x3, #(9 * 8)] + ldr x13, [x3, #(8 * 8)] + ldr x12, [x3, #(7 * 8)] + ldr x11, [x3, #(6 * 8)] + ldr x10, [x3, #(5 * 8)] + ldr x9, [x3, #(4 * 8)] + ldr x8, [x3, #(3 * 8)] + ldr x7, [x3, #(2 * 8)] + ldr x6, [x3, #(1 * 8)] + ldr x5, [x3, #(0 * 8)] + + adr x26, 1f + add x26, x26, x25, lsl #2 + br x26 +1: + msr dbgwcr15_el1, x20 + msr dbgwcr14_el1, x19 + msr dbgwcr13_el1, x18 + msr dbgwcr12_el1, x17 + msr dbgwcr11_el1, x16 + msr dbgwcr10_el1, x15 + msr dbgwcr9_el1, x14 + msr dbgwcr8_el1, x13 + msr dbgwcr7_el1, x12 + msr dbgwcr6_el1, x11 + msr dbgwcr5_el1, x10 + msr dbgwcr4_el1, x9 + msr dbgwcr3_el1, x8 + msr dbgwcr2_el1, x7 + msr dbgwcr1_el1, x6 + msr dbgwcr0_el1, x5 + + add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1) + + adr x26, 1f + add x26, x26, x25, lsl #2 + br x26 +1: + ldr x20, [x3, #(15 * 8)] + ldr x19, [x3, #(14 * 8)] + ldr x18, [x3, #(13 * 8)] + ldr x17, [x3, #(12 * 8)] + ldr x16, [x3, #(11 * 8)] + ldr x15, [x3, #(10 * 8)] + ldr x14, [x3, #(9 * 8)] + ldr x13, [x3, #(8 * 8)] + ldr x12, [x3, #(7 * 8)] + ldr x11, [x3, #(6 * 8)] + ldr x10, [x3, #(5 * 8)] + ldr x9, [x3, #(4 * 8)] + ldr x8, [x3, #(3 * 8)] + ldr x7, [x3, #(2 * 8)] + ldr x6, [x3, #(1 * 8)] + ldr x5, [x3, #(0 * 8)] + + adr x26, 1f + add x26, x26, x25, lsl #2 + br x26 +1: + msr dbgwvr15_el1, x20 + msr dbgwvr14_el1, x19 + msr dbgwvr13_el1, x18 + msr dbgwvr12_el1, x17 + msr dbgwvr11_el1, x16 + msr dbgwvr10_el1, x15 + msr dbgwvr9_el1, x14 + msr dbgwvr8_el1, x13 + msr dbgwvr7_el1, x12 + msr dbgwvr6_el1, x11 + msr dbgwvr5_el1, x10 + msr dbgwvr4_el1, x9 + msr dbgwvr3_el1, x8 + msr dbgwvr2_el1, x7 + msr dbgwvr1_el1, x6 + msr dbgwvr0_el1, x5 + + ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)] + msr mdccint_el1, x21 .endm .macro skip_32bit_state tmp, target @@ -282,6 +667,35 @@ __kvm_hyp_code_start: tbz \tmp, #12, \target .endm +.macro skip_debug_state tmp, target + ldr \tmp, [x0, #VCPU_DEBUG_FLAGS] + tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target +.endm + +.macro compute_debug_state target + // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY + // is set, we do a full save/restore cycle and disable trapping. + add x25, x0, #VCPU_CONTEXT + + // Check the state of MDSCR_EL1 + ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)] + and x26, x25, #DBG_MDSCR_KDE + and x25, x25, #DBG_MDSCR_MDE + adds xzr, x25, x26 + b.eq 9998f // Nothing to see there + + // If any interesting bits was set, we must set the flag + mov x26, #KVM_ARM64_DEBUG_DIRTY + str x26, [x0, #VCPU_DEBUG_FLAGS] + b 9999f // Don't skip restore + +9998: + // Otherwise load the flags from memory in case we recently + // trapped + skip_debug_state x25, \target +9999: +.endm + .macro save_guest_32bit_state skip_32bit_state x3, 1f @@ -297,10 +711,13 @@ __kvm_hyp_code_start: mrs x4, dacr32_el2 mrs x5, ifsr32_el2 mrs x6, fpexc32_el2 - mrs x7, dbgvcr32_el2 stp x4, x5, [x3] - stp x6, x7, [x3, #16] + str x6, [x3, #16] + skip_debug_state x8, 2f + mrs x7, dbgvcr32_el2 + str x7, [x3, #24] +2: skip_tee_state x8, 1f add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) @@ -323,12 +740,15 @@ __kvm_hyp_code_start: add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2) ldp x4, x5, [x3] - ldp x6, x7, [x3, #16] + ldr x6, [x3, #16] msr dacr32_el2, x4 msr ifsr32_el2, x5 msr fpexc32_el2, x6 - msr dbgvcr32_el2, x7 + skip_debug_state x8, 2f + ldr x7, [x3, #24] + msr dbgvcr32_el2, x7 +2: skip_tee_state x8, 1f add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1) @@ -339,11 +759,8 @@ __kvm_hyp_code_start: .endm .macro activate_traps - ldr x2, [x0, #VCPU_IRQ_LINES] - ldr x1, [x0, #VCPU_HCR_EL2] - orr x2, x2, x1 - msr hcr_el2, x2 - + ldr x2, [x0, #VCPU_HCR_EL2] + msr hcr_el2, x2 ldr x2, =(CPTR_EL2_TTA) msr cptr_el2, x2 @@ -353,6 +770,14 @@ __kvm_hyp_code_start: mrs x2, mdcr_el2 and x2, x2, #MDCR_EL2_HPMN_MASK orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) + orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA) + + // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap + // if not dirty. + ldr x3, [x0, #VCPU_DEBUG_FLAGS] + tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f + orr x2, x2, #MDCR_EL2_TDA +1: msr mdcr_el2, x2 .endm @@ -379,100 +804,33 @@ __kvm_hyp_code_start: .endm /* - * Save the VGIC CPU state into memory - * x0: Register pointing to VCPU struct - * Do not corrupt x1!!! + * Call into the vgic backend for state saving */ .macro save_vgic_state - /* Get VGIC VCTRL base into x2 */ - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr x2, [x2, #KVM_VGIC_VCTRL] - kern_hyp_va x2 - cbz x2, 2f // disabled - - /* Compute the address of struct vgic_cpu */ - add x3, x0, #VCPU_VGIC_CPU - - /* Save all interesting registers */ - ldr w4, [x2, #GICH_HCR] - ldr w5, [x2, #GICH_VMCR] - ldr w6, [x2, #GICH_MISR] - ldr w7, [x2, #GICH_EISR0] - ldr w8, [x2, #GICH_EISR1] - ldr w9, [x2, #GICH_ELRSR0] - ldr w10, [x2, #GICH_ELRSR1] - ldr w11, [x2, #GICH_APR] -CPU_BE( rev w4, w4 ) -CPU_BE( rev w5, w5 ) -CPU_BE( rev w6, w6 ) -CPU_BE( rev w7, w7 ) -CPU_BE( rev w8, w8 ) -CPU_BE( rev w9, w9 ) -CPU_BE( rev w10, w10 ) -CPU_BE( rev w11, w11 ) - - str w4, [x3, #VGIC_CPU_HCR] - str w5, [x3, #VGIC_CPU_VMCR] - str w6, [x3, #VGIC_CPU_MISR] - str w7, [x3, #VGIC_CPU_EISR] - str w8, [x3, #(VGIC_CPU_EISR + 4)] - str w9, [x3, #VGIC_CPU_ELRSR] - str w10, [x3, #(VGIC_CPU_ELRSR + 4)] - str w11, [x3, #VGIC_CPU_APR] - - /* Clear GICH_HCR */ - str wzr, [x2, #GICH_HCR] - - /* Save list registers */ - add x2, x2, #GICH_LR0 - ldr w4, [x3, #VGIC_CPU_NR_LR] - add x3, x3, #VGIC_CPU_LR -1: ldr w5, [x2], #4 -CPU_BE( rev w5, w5 ) - str w5, [x3], #4 - sub w4, w4, #1 - cbnz w4, 1b -2: + adr x24, __vgic_sr_vectors + ldr x24, [x24, VGIC_SAVE_FN] + kern_hyp_va x24 + blr x24 + mrs x24, hcr_el2 + mov x25, #HCR_INT_OVERRIDE + neg x25, x25 + and x24, x24, x25 + msr hcr_el2, x24 .endm /* - * Restore the VGIC CPU state from memory - * x0: Register pointing to VCPU struct + * Call into the vgic backend for state restoring */ .macro restore_vgic_state - /* Get VGIC VCTRL base into x2 */ - ldr x2, [x0, #VCPU_KVM] - kern_hyp_va x2 - ldr x2, [x2, #KVM_VGIC_VCTRL] - kern_hyp_va x2 - cbz x2, 2f // disabled - - /* Compute the address of struct vgic_cpu */ - add x3, x0, #VCPU_VGIC_CPU - - /* We only restore a minimal set of registers */ - ldr w4, [x3, #VGIC_CPU_HCR] - ldr w5, [x3, #VGIC_CPU_VMCR] - ldr w6, [x3, #VGIC_CPU_APR] -CPU_BE( rev w4, w4 ) -CPU_BE( rev w5, w5 ) -CPU_BE( rev w6, w6 ) - - str w4, [x2, #GICH_HCR] - str w5, [x2, #GICH_VMCR] - str w6, [x2, #GICH_APR] - - /* Restore list registers */ - add x2, x2, #GICH_LR0 - ldr w4, [x3, #VGIC_CPU_NR_LR] - add x3, x3, #VGIC_CPU_LR -1: ldr w5, [x3], #4 -CPU_BE( rev w5, w5 ) - str w5, [x2], #4 - sub w4, w4, #1 - cbnz w4, 1b -2: + mrs x24, hcr_el2 + ldr x25, [x0, #VCPU_IRQ_LINES] + orr x24, x24, #HCR_INT_OVERRIDE + orr x24, x24, x25 + msr hcr_el2, x24 + adr x24, __vgic_sr_vectors + ldr x24, [x24, #VGIC_RESTORE_FN] + kern_hyp_va x24 + blr x24 .endm .macro save_timer_state @@ -537,6 +895,14 @@ __restore_sysregs: restore_sysregs ret +__save_debug: + save_debug + ret + +__restore_debug: + restore_debug + ret + __save_fpsimd: save_fpsimd ret @@ -568,6 +934,9 @@ ENTRY(__kvm_vcpu_run) bl __save_fpsimd bl __save_sysregs + compute_debug_state 1f + bl __save_debug +1: activate_traps activate_vm @@ -579,6 +948,10 @@ ENTRY(__kvm_vcpu_run) bl __restore_sysregs bl __restore_fpsimd + + skip_debug_state x3, 1f + bl __restore_debug +1: restore_guest_32bit_state restore_guest_regs @@ -595,6 +968,10 @@ __kvm_vcpu_return: save_guest_regs bl __save_fpsimd bl __save_sysregs + + skip_debug_state x3, 1f + bl __save_debug +1: save_guest_32bit_state save_timer_state @@ -609,6 +986,14 @@ __kvm_vcpu_return: bl __restore_sysregs bl __restore_fpsimd + + skip_debug_state x3, 1f + // Clear the dirty flag for the next run, as all the state has + // already been saved. Note that we nuke the whole 64bit word. + // If we ever add more flags, we'll have to be more careful... + str xzr, [x0, #VCPU_DEBUG_FLAGS] + bl __restore_debug +1: restore_host_regs mov x0, x1 @@ -653,6 +1038,12 @@ ENTRY(__kvm_flush_vm_context) ret ENDPROC(__kvm_flush_vm_context) + // struct vgic_sr_vectors __vgi_sr_vectors; + .align 3 +ENTRY(__vgic_sr_vectors) + .skip VGIC_SR_VECTOR_SZ +ENDPROC(__vgic_sr_vectors) + __kvm_hyp_panic: // Guess the context by looking at VTTBR: // If zero, then we're already a host. @@ -830,7 +1221,7 @@ el1_trap: mrs x2, far_el2 2: mrs x0, tpidr_el2 - str x1, [x0, #VCPU_ESR_EL2] + str w1, [x0, #VCPU_ESR_EL2] str x2, [x0, #VCPU_FAR_EL2] str x3, [x0, #VCPU_HPFAR_EL2] @@ -880,7 +1271,4 @@ ENTRY(__kvm_hyp_vector) ventry el1_error_invalid // Error 32-bit EL1 ENDPROC(__kvm_hyp_vector) -__kvm_hyp_code_end: - .globl __kvm_hyp_code_end - .popsection |