diff options
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 220 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_interrupts.S | 72 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke_interrupts.S | 272 | ||||
-rw-r--r-- | arch/powerpc/kvm/bookehv_interrupts.S | 222 |
4 files changed, 393 insertions, 393 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index a1044f43becd..bc99015030c3 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -206,24 +206,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) /* Load up FP, VMX and VSX registers */ bl kvmppc_load_fp - ld r14, VCPU_GPR(r14)(r4) - ld r15, VCPU_GPR(r15)(r4) - ld r16, VCPU_GPR(r16)(r4) - ld r17, VCPU_GPR(r17)(r4) - ld r18, VCPU_GPR(r18)(r4) - ld r19, VCPU_GPR(r19)(r4) - ld r20, VCPU_GPR(r20)(r4) - ld r21, VCPU_GPR(r21)(r4) - ld r22, VCPU_GPR(r22)(r4) - ld r23, VCPU_GPR(r23)(r4) - ld r24, VCPU_GPR(r24)(r4) - ld r25, VCPU_GPR(r25)(r4) - ld r26, VCPU_GPR(r26)(r4) - ld r27, VCPU_GPR(r27)(r4) - ld r28, VCPU_GPR(r28)(r4) - ld r29, VCPU_GPR(r29)(r4) - ld r30, VCPU_GPR(r30)(r4) - ld r31, VCPU_GPR(r31)(r4) + ld r14, VCPU_GPR(R14)(r4) + ld r15, VCPU_GPR(R15)(r4) + ld r16, VCPU_GPR(R16)(r4) + ld r17, VCPU_GPR(R17)(r4) + ld r18, VCPU_GPR(R18)(r4) + ld r19, VCPU_GPR(R19)(r4) + ld r20, VCPU_GPR(R20)(r4) + ld r21, VCPU_GPR(R21)(r4) + ld r22, VCPU_GPR(R22)(r4) + ld r23, VCPU_GPR(R23)(r4) + ld r24, VCPU_GPR(R24)(r4) + ld r25, VCPU_GPR(R25)(r4) + ld r26, VCPU_GPR(R26)(r4) + ld r27, VCPU_GPR(R27)(r4) + ld r28, VCPU_GPR(R28)(r4) + ld r29, VCPU_GPR(R29)(r4) + ld r30, VCPU_GPR(R30)(r4) + ld r31, VCPU_GPR(R31)(r4) BEGIN_FTR_SECTION /* Switch DSCR to guest value */ @@ -547,21 +547,21 @@ fast_guest_return: mtlr r5 mtcr r6 - ld r0, VCPU_GPR(r0)(r4) - ld r1, VCPU_GPR(r1)(r4) - ld r2, VCPU_GPR(r2)(r4) - ld r3, VCPU_GPR(r3)(r4) - ld r5, VCPU_GPR(r5)(r4) - ld r6, VCPU_GPR(r6)(r4) - ld r7, VCPU_GPR(r7)(r4) - ld r8, VCPU_GPR(r8)(r4) - ld r9, VCPU_GPR(r9)(r4) - ld r10, VCPU_GPR(r10)(r4) - ld r11, VCPU_GPR(r11)(r4) - ld r12, VCPU_GPR(r12)(r4) - ld r13, VCPU_GPR(r13)(r4) - - ld r4, VCPU_GPR(r4)(r4) + ld r0, VCPU_GPR(R0)(r4) + ld r1, VCPU_GPR(R1)(r4) + ld r2, VCPU_GPR(R2)(r4) + ld r3, VCPU_GPR(R3)(r4) + ld r5, VCPU_GPR(R5)(r4) + ld r6, VCPU_GPR(R6)(r4) + ld r7, VCPU_GPR(R7)(r4) + ld r8, VCPU_GPR(R8)(r4) + ld r9, VCPU_GPR(R9)(r4) + ld r10, VCPU_GPR(R10)(r4) + ld r11, VCPU_GPR(R11)(r4) + ld r12, VCPU_GPR(R12)(r4) + ld r13, VCPU_GPR(R13)(r4) + + ld r4, VCPU_GPR(R4)(r4) hrfid b . @@ -590,22 +590,22 @@ kvmppc_interrupt: /* Save registers */ - std r0, VCPU_GPR(r0)(r9) - std r1, VCPU_GPR(r1)(r9) - std r2, VCPU_GPR(r2)(r9) - std r3, VCPU_GPR(r3)(r9) - std r4, VCPU_GPR(r4)(r9) - std r5, VCPU_GPR(r5)(r9) - std r6, VCPU_GPR(r6)(r9) - std r7, VCPU_GPR(r7)(r9) - std r8, VCPU_GPR(r8)(r9) + std r0, VCPU_GPR(R0)(r9) + std r1, VCPU_GPR(R1)(r9) + std r2, VCPU_GPR(R2)(r9) + std r3, VCPU_GPR(R3)(r9) + std r4, VCPU_GPR(R4)(r9) + std r5, VCPU_GPR(R5)(r9) + std r6, VCPU_GPR(R6)(r9) + std r7, VCPU_GPR(R7)(r9) + std r8, VCPU_GPR(R8)(r9) ld r0, HSTATE_HOST_R2(r13) - std r0, VCPU_GPR(r9)(r9) - std r10, VCPU_GPR(r10)(r9) - std r11, VCPU_GPR(r11)(r9) + std r0, VCPU_GPR(R9)(r9) + std r10, VCPU_GPR(R10)(r9) + std r11, VCPU_GPR(R11)(r9) ld r3, HSTATE_SCRATCH0(r13) lwz r4, HSTATE_SCRATCH1(r13) - std r3, VCPU_GPR(r12)(r9) + std r3, VCPU_GPR(R12)(r9) stw r4, VCPU_CR(r9) /* Restore R1/R2 so we can handle faults */ @@ -626,7 +626,7 @@ kvmppc_interrupt: GET_SCRATCH0(r3) mflr r4 - std r3, VCPU_GPR(r13)(r9) + std r3, VCPU_GPR(R13)(r9) std r4, VCPU_LR(r9) /* Unset guest mode */ @@ -968,24 +968,24 @@ BEGIN_FTR_SECTION END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) /* Save non-volatile GPRs */ - std r14, VCPU_GPR(r14)(r9) - std r15, VCPU_GPR(r15)(r9) - std r16, VCPU_GPR(r16)(r9) - std r17, VCPU_GPR(r17)(r9) - std r18, VCPU_GPR(r18)(r9) - std r19, VCPU_GPR(r19)(r9) - std r20, VCPU_GPR(r20)(r9) - std r21, VCPU_GPR(r21)(r9) - std r22, VCPU_GPR(r22)(r9) - std r23, VCPU_GPR(r23)(r9) - std r24, VCPU_GPR(r24)(r9) - std r25, VCPU_GPR(r25)(r9) - std r26, VCPU_GPR(r26)(r9) - std r27, VCPU_GPR(r27)(r9) - std r28, VCPU_GPR(r28)(r9) - std r29, VCPU_GPR(r29)(r9) - std r30, VCPU_GPR(r30)(r9) - std r31, VCPU_GPR(r31)(r9) + std r14, VCPU_GPR(R14)(r9) + std r15, VCPU_GPR(R15)(r9) + std r16, VCPU_GPR(R16)(r9) + std r17, VCPU_GPR(R17)(r9) + std r18, VCPU_GPR(R18)(r9) + std r19, VCPU_GPR(R19)(r9) + std r20, VCPU_GPR(R20)(r9) + std r21, VCPU_GPR(R21)(r9) + std r22, VCPU_GPR(R22)(r9) + std r23, VCPU_GPR(R23)(r9) + std r24, VCPU_GPR(R24)(r9) + std r25, VCPU_GPR(R25)(r9) + std r26, VCPU_GPR(R26)(r9) + std r27, VCPU_GPR(R27)(r9) + std r28, VCPU_GPR(R28)(r9) + std r29, VCPU_GPR(R29)(r9) + std r30, VCPU_GPR(R30)(r9) + std r31, VCPU_GPR(R31)(r9) /* Save SPRGs */ mfspr r3, SPRN_SPRG0 @@ -1160,7 +1160,7 @@ kvmppc_hdsi: andi. r0, r11, MSR_DR /* data relocation enabled? */ beq 3f clrrdi r0, r4, 28 - PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ + PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ bne 1f /* if no SLB entry found */ 4: std r4, VCPU_FAULT_DAR(r9) stw r6, VCPU_FAULT_DSISR(r9) @@ -1234,7 +1234,7 @@ kvmppc_hisi: andi. r0, r11, MSR_IR /* instruction relocation enabled? */ beq 3f clrrdi r0, r10, 28 - PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */ + PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */ bne 1f /* if no SLB entry found */ 4: /* Search the hash table. */ @@ -1278,7 +1278,7 @@ kvmppc_hisi: */ .globl hcall_try_real_mode hcall_try_real_mode: - ld r3,VCPU_GPR(r3)(r9) + ld r3,VCPU_GPR(R3)(r9) andi. r0,r11,MSR_PR bne hcall_real_cont clrrdi r3,r3,2 @@ -1291,12 +1291,12 @@ hcall_try_real_mode: add r3,r3,r4 mtctr r3 mr r3,r9 /* get vcpu pointer */ - ld r4,VCPU_GPR(r4)(r9) + ld r4,VCPU_GPR(R4)(r9) bctrl cmpdi r3,H_TOO_HARD beq hcall_real_fallback ld r4,HSTATE_KVM_VCPU(r13) - std r3,VCPU_GPR(r3)(r4) + std r3,VCPU_GPR(R3)(r4) ld r10,VCPU_PC(r4) ld r11,VCPU_MSR(r4) b fast_guest_return @@ -1424,7 +1424,7 @@ _GLOBAL(kvmppc_h_cede) li r0,0 /* set trap to 0 to say hcall is handled */ stw r0,VCPU_TRAP(r3) li r0,H_SUCCESS - std r0,VCPU_GPR(r3)(r3) + std r0,VCPU_GPR(R3)(r3) BEGIN_FTR_SECTION b 2f /* just send it up to host on 970 */ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) @@ -1443,7 +1443,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) addi r6,r5,VCORE_NAPPING_THREADS 31: lwarx r4,0,r6 or r4,r4,r0 - PPC_POPCNTW(r7,r4) + PPC_POPCNTW(R7,R4) cmpw r7,r8 bge 2f stwcx. r4,0,r6 @@ -1464,24 +1464,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR. */ /* Save non-volatile GPRs */ - std r14, VCPU_GPR(r14)(r3) - std r15, VCPU_GPR(r15)(r3) - std r16, VCPU_GPR(r16)(r3) - std r17, VCPU_GPR(r17)(r3) - std r18, VCPU_GPR(r18)(r3) - std r19, VCPU_GPR(r19)(r3) - std r20, VCPU_GPR(r20)(r3) - std r21, VCPU_GPR(r21)(r3) - std r22, VCPU_GPR(r22)(r3) - std r23, VCPU_GPR(r23)(r3) - std r24, VCPU_GPR(r24)(r3) - std r25, VCPU_GPR(r25)(r3) - std r26, VCPU_GPR(r26)(r3) - std r27, VCPU_GPR(r27)(r3) - std r28, VCPU_GPR(r28)(r3) - std r29, VCPU_GPR(r29)(r3) - std r30, VCPU_GPR(r30)(r3) - std r31, VCPU_GPR(r31)(r3) + std r14, VCPU_GPR(R14)(r3) + std r15, VCPU_GPR(R15)(r3) + std r16, VCPU_GPR(R16)(r3) + std r17, VCPU_GPR(R17)(r3) + std r18, VCPU_GPR(R18)(r3) + std r19, VCPU_GPR(R19)(r3) + std r20, VCPU_GPR(R20)(r3) + std r21, VCPU_GPR(R21)(r3) + std r22, VCPU_GPR(R22)(r3) + std r23, VCPU_GPR(R23)(r3) + std r24, VCPU_GPR(R24)(r3) + std r25, VCPU_GPR(R25)(r3) + std r26, VCPU_GPR(R26)(r3) + std r27, VCPU_GPR(R27)(r3) + std r28, VCPU_GPR(R28)(r3) + std r29, VCPU_GPR(R29)(r3) + std r30, VCPU_GPR(R30)(r3) + std r31, VCPU_GPR(R31)(r3) /* save FP state */ bl .kvmppc_save_fp @@ -1513,24 +1513,24 @@ kvm_end_cede: bl kvmppc_load_fp /* Load NV GPRS */ - ld r14, VCPU_GPR(r14)(r4) - ld r15, VCPU_GPR(r15)(r4) - ld r16, VCPU_GPR(r16)(r4) - ld r17, VCPU_GPR(r17)(r4) - ld r18, VCPU_GPR(r18)(r4) - ld r19, VCPU_GPR(r19)(r4) - ld r20, VCPU_GPR(r20)(r4) - ld r21, VCPU_GPR(r21)(r4) - ld r22, VCPU_GPR(r22)(r4) - ld r23, VCPU_GPR(r23)(r4) - ld r24, VCPU_GPR(r24)(r4) - ld r25, VCPU_GPR(r25)(r4) - ld r26, VCPU_GPR(r26)(r4) - ld r27, VCPU_GPR(r27)(r4) - ld r28, VCPU_GPR(r28)(r4) - ld r29, VCPU_GPR(r29)(r4) - ld r30, VCPU_GPR(r30)(r4) - ld r31, VCPU_GPR(r31)(r4) + ld r14, VCPU_GPR(R14)(r4) + ld r15, VCPU_GPR(R15)(r4) + ld r16, VCPU_GPR(R16)(r4) + ld r17, VCPU_GPR(R17)(r4) + ld r18, VCPU_GPR(R18)(r4) + ld r19, VCPU_GPR(R19)(r4) + ld r20, VCPU_GPR(R20)(r4) + ld r21, VCPU_GPR(R21)(r4) + ld r22, VCPU_GPR(R22)(r4) + ld r23, VCPU_GPR(R23)(r4) + ld r24, VCPU_GPR(R24)(r4) + ld r25, VCPU_GPR(R25)(r4) + ld r26, VCPU_GPR(R26)(r4) + ld r27, VCPU_GPR(R27)(r4) + ld r28, VCPU_GPR(R28)(r4) + ld r29, VCPU_GPR(R29)(r4) + ld r30, VCPU_GPR(R30)(r4) + ld r31, VCPU_GPR(R31)(r4) /* clear our bit in vcore->napping_threads */ 33: ld r5,HSTATE_KVM_VCORE(r13) @@ -1649,7 +1649,7 @@ BEGIN_FTR_SECTION reg = 0 .rept 32 li r6,reg*16+VCPU_VSRS - STXVD2X(reg,r6,r3) + STXVD2X(reg,R6,R3) reg = reg + 1 .endr FTR_SECTION_ELSE @@ -1711,7 +1711,7 @@ BEGIN_FTR_SECTION reg = 0 .rept 32 li r7,reg*16+VCPU_VSRS - LXVD2X(reg,r7,r4) + LXVD2X(reg,R7,R4) reg = reg + 1 .endr FTR_SECTION_ELSE diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index 3e35383bdb21..2ddab0f90a81 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S @@ -39,24 +39,24 @@ #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) #define VCPU_LOAD_NVGPRS(vcpu) \ - PPC_LL r14, VCPU_GPR(r14)(vcpu); \ - PPC_LL r15, VCPU_GPR(r15)(vcpu); \ - PPC_LL r16, VCPU_GPR(r16)(vcpu); \ - PPC_LL r17, VCPU_GPR(r17)(vcpu); \ - PPC_LL r18, VCPU_GPR(r18)(vcpu); \ - PPC_LL r19, VCPU_GPR(r19)(vcpu); \ - PPC_LL r20, VCPU_GPR(r20)(vcpu); \ - PPC_LL r21, VCPU_GPR(r21)(vcpu); \ - PPC_LL r22, VCPU_GPR(r22)(vcpu); \ - PPC_LL r23, VCPU_GPR(r23)(vcpu); \ - PPC_LL r24, VCPU_GPR(r24)(vcpu); \ - PPC_LL r25, VCPU_GPR(r25)(vcpu); \ - PPC_LL r26, VCPU_GPR(r26)(vcpu); \ - PPC_LL r27, VCPU_GPR(r27)(vcpu); \ - PPC_LL r28, VCPU_GPR(r28)(vcpu); \ - PPC_LL r29, VCPU_GPR(r29)(vcpu); \ - PPC_LL r30, VCPU_GPR(r30)(vcpu); \ - PPC_LL r31, VCPU_GPR(r31)(vcpu); \ + PPC_LL r14, VCPU_GPR(R14)(vcpu); \ + PPC_LL r15, VCPU_GPR(R15)(vcpu); \ + PPC_LL r16, VCPU_GPR(R16)(vcpu); \ + PPC_LL r17, VCPU_GPR(R17)(vcpu); \ + PPC_LL r18, VCPU_GPR(R18)(vcpu); \ + PPC_LL r19, VCPU_GPR(R19)(vcpu); \ + PPC_LL r20, VCPU_GPR(R20)(vcpu); \ + PPC_LL r21, VCPU_GPR(R21)(vcpu); \ + PPC_LL r22, VCPU_GPR(R22)(vcpu); \ + PPC_LL r23, VCPU_GPR(R23)(vcpu); \ + PPC_LL r24, VCPU_GPR(R24)(vcpu); \ + PPC_LL r25, VCPU_GPR(R25)(vcpu); \ + PPC_LL r26, VCPU_GPR(R26)(vcpu); \ + PPC_LL r27, VCPU_GPR(R27)(vcpu); \ + PPC_LL r28, VCPU_GPR(R28)(vcpu); \ + PPC_LL r29, VCPU_GPR(R29)(vcpu); \ + PPC_LL r30, VCPU_GPR(R30)(vcpu); \ + PPC_LL r31, VCPU_GPR(R31)(vcpu); \ /***************************************************************************** * * @@ -131,24 +131,24 @@ kvmppc_handler_highmem: /* R7 = vcpu */ PPC_LL r7, GPR4(r1) - PPC_STL r14, VCPU_GPR(r14)(r7) - PPC_STL r15, VCPU_GPR(r15)(r7) - PPC_STL r16, VCPU_GPR(r16)(r7) - PPC_STL r17, VCPU_GPR(r17)(r7) - PPC_STL r18, VCPU_GPR(r18)(r7) - PPC_STL r19, VCPU_GPR(r19)(r7) - PPC_STL r20, VCPU_GPR(r20)(r7) - PPC_STL r21, VCPU_GPR(r21)(r7) - PPC_STL r22, VCPU_GPR(r22)(r7) - PPC_STL r23, VCPU_GPR(r23)(r7) - PPC_STL r24, VCPU_GPR(r24)(r7) - PPC_STL r25, VCPU_GPR(r25)(r7) - PPC_STL r26, VCPU_GPR(r26)(r7) - PPC_STL r27, VCPU_GPR(r27)(r7) - PPC_STL r28, VCPU_GPR(r28)(r7) - PPC_STL r29, VCPU_GPR(r29)(r7) - PPC_STL r30, VCPU_GPR(r30)(r7) - PPC_STL r31, VCPU_GPR(r31)(r7) + PPC_STL r14, VCPU_GPR(R14)(r7) + PPC_STL r15, VCPU_GPR(R15)(r7) + PPC_STL r16, VCPU_GPR(R16)(r7) + PPC_STL r17, VCPU_GPR(R17)(r7) + PPC_STL r18, VCPU_GPR(R18)(r7) + PPC_STL r19, VCPU_GPR(R19)(r7) + PPC_STL r20, VCPU_GPR(R20)(r7) + PPC_STL r21, VCPU_GPR(R21)(r7) + PPC_STL r22, VCPU_GPR(R22)(r7) + PPC_STL r23, VCPU_GPR(R23)(r7) + PPC_STL r24, VCPU_GPR(R24)(r7) + PPC_STL r25, VCPU_GPR(R25)(r7) + PPC_STL r26, VCPU_GPR(R26)(r7) + PPC_STL r27, VCPU_GPR(R27)(r7) + PPC_STL r28, VCPU_GPR(R28)(r7) + PPC_STL r29, VCPU_GPR(R29)(r7) + PPC_STL r30, VCPU_GPR(R30)(r7) + PPC_STL r31, VCPU_GPR(R31)(r7) /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ mr r5, r12 diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 8feec2ff3928..e598a5a0d5e4 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S @@ -37,7 +37,7 @@ #define HOST_CR 16 #define HOST_NV_GPRS 20 #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4)) -#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4) +#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4) #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */ #define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */ @@ -58,8 +58,8 @@ _GLOBAL(kvmppc_handler_\ivor_nr) /* Get pointer to vcpu and record exit number. */ mtspr SPRN_SPRG_WSCRATCH0, r4 mfspr r4, SPRN_SPRG_RVCPU - stw r5, VCPU_GPR(r5)(r4) - stw r6, VCPU_GPR(r6)(r4) + stw r5, VCPU_GPR(R5)(r4) + stw r6, VCPU_GPR(R6)(r4) mfctr r5 lis r6, kvmppc_resume_host@h stw r5, VCPU_CTR(r4) @@ -100,12 +100,12 @@ _GLOBAL(kvmppc_handler_len) * r5: KVM exit number */ _GLOBAL(kvmppc_resume_host) - stw r3, VCPU_GPR(r3)(r4) + stw r3, VCPU_GPR(R3)(r4) mfcr r3 stw r3, VCPU_CR(r4) - stw r7, VCPU_GPR(r7)(r4) - stw r8, VCPU_GPR(r8)(r4) - stw r9, VCPU_GPR(r9)(r4) + stw r7, VCPU_GPR(R7)(r4) + stw r8, VCPU_GPR(R8)(r4) + stw r9, VCPU_GPR(R9)(r4) li r6, 1 slw r6, r6, r5 @@ -135,23 +135,23 @@ _GLOBAL(kvmppc_resume_host) isync stw r9, VCPU_LAST_INST(r4) - stw r15, VCPU_GPR(r15)(r4) - stw r16, VCPU_GPR(r16)(r4) - stw r17, VCPU_GPR(r17)(r4) - stw r18, VCPU_GPR(r18)(r4) - stw r19, VCPU_GPR(r19)(r4) - stw r20, VCPU_GPR(r20)(r4) - stw r21, VCPU_GPR(r21)(r4) - stw r22, VCPU_GPR(r22)(r4) - stw r23, VCPU_GPR(r23)(r4) - stw r24, VCPU_GPR(r24)(r4) - stw r25, VCPU_GPR(r25)(r4) - stw r26, VCPU_GPR(r26)(r4) - stw r27, VCPU_GPR(r27)(r4) - stw r28, VCPU_GPR(r28)(r4) - stw r29, VCPU_GPR(r29)(r4) - stw r30, VCPU_GPR(r30)(r4) - stw r31, VCPU_GPR(r31)(r4) + stw r15, VCPU_GPR(R15)(r4) + stw r16, VCPU_GPR(R16)(r4) + stw r17, VCPU_GPR(R17)(r4) + stw r18, VCPU_GPR(R18)(r4) + stw r19, VCPU_GPR(R19)(r4) + stw r20, VCPU_GPR(R20)(r4) + stw r21, VCPU_GPR(R21)(r4) + stw r22, VCPU_GPR(R22)(r4) + stw r23, VCPU_GPR(R23)(r4) + stw r24, VCPU_GPR(R24)(r4) + stw r25, VCPU_GPR(R25)(r4) + stw r26, VCPU_GPR(R26)(r4) + stw r27, VCPU_GPR(R27)(r4) + stw r28, VCPU_GPR(R28)(r4) + stw r29, VCPU_GPR(R29)(r4) + stw r30, VCPU_GPR(R30)(r4) + stw r31, VCPU_GPR(R31)(r4) ..skip_inst_copy: /* Also grab DEAR and ESR before the host can clobber them. */ @@ -169,20 +169,20 @@ _GLOBAL(kvmppc_resume_host) ..skip_esr: /* Save remaining volatile guest register state to vcpu. */ - stw r0, VCPU_GPR(r0)(r4) - stw r1, VCPU_GPR(r1)(r4) - stw r2, VCPU_GPR(r2)(r4) - stw r10, VCPU_GPR(r10)(r4) - stw r11, VCPU_GPR(r11)(r4) - stw r12, VCPU_GPR(r12)(r4) - stw r13, VCPU_GPR(r13)(r4) - stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */ + stw r0, VCPU_GPR(R0)(r4) + stw r1, VCPU_GPR(R1)(r4) + stw r2, VCPU_GPR(R2)(r4) + stw r10, VCPU_GPR(R10)(r4) + stw r11, VCPU_GPR(R11)(r4) + stw r12, VCPU_GPR(R12)(r4) + stw r13, VCPU_GPR(R13)(r4) + stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */ mflr r3 stw r3, VCPU_LR(r4) mfxer r3 stw r3, VCPU_XER(r4) mfspr r3, SPRN_SPRG_RSCRATCH0 - stw r3, VCPU_GPR(r4)(r4) + stw r3, VCPU_GPR(R4)(r4) mfspr r3, SPRN_SRR0 stw r3, VCPU_PC(r4) @@ -214,28 +214,28 @@ _GLOBAL(kvmppc_resume_host) /* Restore vcpu pointer and the nonvolatiles we used. */ mr r4, r14 - lwz r14, VCPU_GPR(r14)(r4) + lwz r14, VCPU_GPR(R14)(r4) /* Sometimes instruction emulation must restore complete GPR state. */ andi. r5, r3, RESUME_FLAG_NV beq ..skip_nv_load - lwz r15, VCPU_GPR(r15)(r4) - lwz r16, VCPU_GPR(r16)(r4) - lwz r17, VCPU_GPR(r17)(r4) - lwz r18, VCPU_GPR(r18)(r4) - lwz r19, VCPU_GPR(r19)(r4) - lwz r20, VCPU_GPR(r20)(r4) - lwz r21, VCPU_GPR(r21)(r4) - lwz r22, VCPU_GPR(r22)(r4) - lwz r23, VCPU_GPR(r23)(r4) - lwz r24, VCPU_GPR(r24)(r4) - lwz r25, VCPU_GPR(r25)(r4) - lwz r26, VCPU_GPR(r26)(r4) - lwz r27, VCPU_GPR(r27)(r4) - lwz r28, VCPU_GPR(r28)(r4) - lwz r29, VCPU_GPR(r29)(r4) - lwz r30, VCPU_GPR(r30)(r4) - lwz r31, VCPU_GPR(r31)(r4) + lwz r15, VCPU_GPR(R15)(r4) + lwz r16, VCPU_GPR(R16)(r4) + lwz r17, VCPU_GPR(R17)(r4) + lwz r18, VCPU_GPR(R18)(r4) + lwz r19, VCPU_GPR(R19)(r4) + lwz r20, VCPU_GPR(R20)(r4) + lwz r21, VCPU_GPR(R21)(r4) + lwz r22, VCPU_GPR(R22)(r4) + lwz r23, VCPU_GPR(R23)(r4) + lwz r24, VCPU_GPR(R24)(r4) + lwz r25, VCPU_GPR(R25)(r4) + lwz r26, VCPU_GPR(R26)(r4) + lwz r27, VCPU_GPR(R27)(r4) + lwz r28, VCPU_GPR(R28)(r4) + lwz r29, VCPU_GPR(R29)(r4) + lwz r30, VCPU_GPR(R30)(r4) + lwz r31, VCPU_GPR(R31)(r4) ..skip_nv_load: /* Should we return to the guest? */ @@ -257,43 +257,43 @@ heavyweight_exit: /* We already saved guest volatile register state; now save the * non-volatiles. */ - stw r15, VCPU_GPR(r15)(r4) - stw r16, VCPU_GPR(r16)(r4) - stw r17, VCPU_GPR(r17)(r4) - stw r18, VCPU_GPR(r18)(r4) - stw r19, VCPU_GPR(r19)(r4) - stw r20, VCPU_GPR(r20)(r4) - stw r21, VCPU_GPR(r21)(r4) - stw r22, VCPU_GPR(r22)(r4) - stw r23, VCPU_GPR(r23)(r4) - stw r24, VCPU_GPR(r24)(r4) - stw r25, VCPU_GPR(r25)(r4) - stw r26, VCPU_GPR(r26)(r4) - stw r27, VCPU_GPR(r27)(r4) - stw r28, VCPU_GPR(r28)(r4) - stw r29, VCPU_GPR(r29)(r4) - stw r30, VCPU_GPR(r30)(r4) - stw r31, VCPU_GPR(r31)(r4) + stw r15, VCPU_GPR(R15)(r4) + stw r16, VCPU_GPR(R16)(r4) + stw r17, VCPU_GPR(R17)(r4) + stw r18, VCPU_GPR(R18)(r4) + stw r19, VCPU_GPR(R19)(r4) + stw r20, VCPU_GPR(R20)(r4) + stw r21, VCPU_GPR(R21)(r4) + stw r22, VCPU_GPR(R22)(r4) + stw r23, VCPU_GPR(R23)(r4) + stw r24, VCPU_GPR(R24)(r4) + stw r25, VCPU_GPR(R25)(r4) + stw r26, VCPU_GPR(R26)(r4) + stw r27, VCPU_GPR(R27)(r4) + stw r28, VCPU_GPR(R28)(r4) + stw r29, VCPU_GPR(R29)(r4) + stw r30, VCPU_GPR(R30)(r4) + stw r31, VCPU_GPR(R31)(r4) /* Load host non-volatile register state from host stack. */ - lwz r14, HOST_NV_GPR(r14)(r1) - lwz r15, HOST_NV_GPR(r15)(r1) - lwz r16, HOST_NV_GPR(r16)(r1) - lwz r17, HOST_NV_GPR(r17)(r1) - lwz r18, HOST_NV_GPR(r18)(r1) - lwz r19, HOST_NV_GPR(r19)(r1) - lwz r20, HOST_NV_GPR(r20)(r1) - lwz r21, HOST_NV_GPR(r21)(r1) - lwz r22, HOST_NV_GPR(r22)(r1) - lwz r23, HOST_NV_GPR(r23)(r1) - lwz r24, HOST_NV_GPR(r24)(r1) - lwz r25, HOST_NV_GPR(r25)(r1) - lwz r26, HOST_NV_GPR(r26)(r1) - lwz r27, HOST_NV_GPR(r27)(r1) - lwz r28, HOST_NV_GPR(r28)(r1) - lwz r29, HOST_NV_GPR(r29)(r1) - lwz r30, HOST_NV_GPR(r30)(r1) - lwz r31, HOST_NV_GPR(r31)(r1) + lwz r14, HOST_NV_GPR(R14)(r1) + lwz r15, HOST_NV_GPR(R15)(r1) + lwz r16, HOST_NV_GPR(R16)(r1) + lwz r17, HOST_NV_GPR(R17)(r1) + lwz r18, HOST_NV_GPR(R18)(r1) + lwz r19, HOST_NV_GPR(R19)(r1) + lwz r20, HOST_NV_GPR(R20)(r1) + lwz r21, HOST_NV_GPR(R21)(r1) + lwz r22, HOST_NV_GPR(R22)(r1) + lwz r23, HOST_NV_GPR(R23)(r1) + lwz r24, HOST_NV_GPR(R24)(r1) + lwz r25, HOST_NV_GPR(R25)(r1) + lwz r26, HOST_NV_GPR(R26)(r1) + lwz r27, HOST_NV_GPR(R27)(r1) + lwz r28, HOST_NV_GPR(R28)(r1) + lwz r29, HOST_NV_GPR(R29)(r1) + lwz r30, HOST_NV_GPR(R30)(r1) + lwz r31, HOST_NV_GPR(R31)(r1) /* Return to kvm_vcpu_run(). */ lwz r4, HOST_STACK_LR(r1) @@ -321,44 +321,44 @@ _GLOBAL(__kvmppc_vcpu_run) stw r5, HOST_CR(r1) /* Save host non-volatile register state to stack. */ - stw r14, HOST_NV_GPR(r14)(r1) - stw r15, HOST_NV_GPR(r15)(r1) - stw r16, HOST_NV_GPR(r16)(r1) - stw r17, HOST_NV_GPR(r17)(r1) - stw r18, HOST_NV_GPR(r18)(r1) - stw r19, HOST_NV_GPR(r19)(r1) - stw r20, HOST_NV_GPR(r20)(r1) - stw r21, HOST_NV_GPR(r21)(r1) - stw r22, HOST_NV_GPR(r22)(r1) - stw r23, HOST_NV_GPR(r23)(r1) - stw r24, HOST_NV_GPR(r24)(r1) - stw r25, HOST_NV_GPR(r25)(r1) - stw r26, HOST_NV_GPR(r26)(r1) - stw r27, HOST_NV_GPR(r27)(r1) - stw r28, HOST_NV_GPR(r28)(r1) - stw r29, HOST_NV_GPR(r29)(r1) - stw r30, HOST_NV_GPR(r30)(r1) - stw r31, HOST_NV_GPR(r31)(r1) + stw r14, HOST_NV_GPR(R14)(r1) + stw r15, HOST_NV_GPR(R15)(r1) + stw r16, HOST_NV_GPR(R16)(r1) + stw r17, HOST_NV_GPR(R17)(r1) + stw r18, HOST_NV_GPR(R18)(r1) + stw r19, HOST_NV_GPR(R19)(r1) + stw r20, HOST_NV_GPR(R20)(r1) + stw r21, HOST_NV_GPR(R21)(r1) + stw r22, HOST_NV_GPR(R22)(r1) + stw r23, HOST_NV_GPR(R23)(r1) + stw r24, HOST_NV_GPR(R24)(r1) + stw r25, HOST_NV_GPR(R25)(r1) + stw r26, HOST_NV_GPR(R26)(r1) + stw r27, HOST_NV_GPR(R27)(r1) + stw r28, HOST_NV_GPR(R28)(r1) + stw r29, HOST_NV_GPR(R29)(r1) + stw r30, HOST_NV_GPR(R30)(r1) + stw r31, HOST_NV_GPR(R31)(r1) /* Load guest non-volatiles. */ - lwz r14, VCPU_GPR(r14)(r4) - lwz r15, VCPU_GPR(r15)(r4) - lwz r16, VCPU_GPR(r16)(r4) - lwz r17, VCPU_GPR(r17)(r4) - lwz r18, VCPU_GPR(r18)(r4) - lwz r19, VCPU_GPR(r19)(r4) - lwz r20, VCPU_GPR(r20)(r4) - lwz r21, VCPU_GPR(r21)(r4) - lwz r22, VCPU_GPR(r22)(r4) - lwz r23, VCPU_GPR(r23)(r4) - lwz r24, VCPU_GPR(r24)(r4) - lwz r25, VCPU_GPR(r25)(r4) - lwz r26, VCPU_GPR(r26)(r4) - lwz r27, VCPU_GPR(r27)(r4) - lwz r28, VCPU_GPR(r28)(r4) - lwz r29, VCPU_GPR(r29)(r4) - lwz r30, VCPU_GPR(r30)(r4) - lwz r31, VCPU_GPR(r31)(r4) + lwz r14, VCPU_GPR(R14)(r4) + lwz r15, VCPU_GPR(R15)(r4) + lwz r16, VCPU_GPR(R16)(r4) + lwz r17, VCPU_GPR(R17)(r4) + lwz r18, VCPU_GPR(R18)(r4) + lwz r19, VCPU_GPR(R19)(r4) + lwz r20, VCPU_GPR(R20)(r4) + lwz r21, VCPU_GPR(R21)(r4) + lwz r22, VCPU_GPR(R22)(r4) + lwz r23, VCPU_GPR(R23)(r4) + lwz r24, VCPU_GPR(R24)(r4) + lwz r25, VCPU_GPR(R25)(r4) + lwz r26, VCPU_GPR(R26)(r4) + lwz r27, VCPU_GPR(R27)(r4) + lwz r28, VCPU_GPR(R28)(r4) + lwz r29, VCPU_GPR(R29)(r4) + lwz r30, VCPU_GPR(R30)(r4) + lwz r31, VCPU_GPR(R31)(r4) #ifdef CONFIG_SPE /* save host SPEFSCR and load guest SPEFSCR */ @@ -386,13 +386,13 @@ lightweight_exit: #endif /* Load some guest volatiles. */ - lwz r0, VCPU_GPR(r0)(r4) - lwz r2, VCPU_GPR(r2)(r4) - lwz r9, VCPU_GPR(r9)(r4) - lwz r10, VCPU_GPR(r10)(r4) - lwz r11, VCPU_GPR(r11)(r4) - lwz r12, VCPU_GPR(r12)(r4) - lwz r13, VCPU_GPR(r13)(r4) + lwz r0, VCPU_GPR(R0)(r4) + lwz r2, VCPU_GPR(R2)(r4) + lwz r9, VCPU_GPR(R9)(r4) + lwz r10, VCPU_GPR(R10)(r4) + lwz r11, VCPU_GPR(R11)(r4) + lwz r12, VCPU_GPR(R12)(r4) + lwz r13, VCPU_GPR(R13)(r4) lwz r3, VCPU_LR(r4) mtlr r3 lwz r3, VCPU_XER(r4) @@ -411,7 +411,7 @@ lightweight_exit: /* Can't switch the stack pointer until after IVPR is switched, * because host interrupt handlers would get confused. */ - lwz r1, VCPU_GPR(r1)(r4) + lwz r1, VCPU_GPR(R1)(r4) /* * Host interrupt handlers may have clobbered these @@ -449,10 +449,10 @@ lightweight_exit: mtcr r5 mtsrr0 r6 mtsrr1 r7 - lwz r5, VCPU_GPR(r5)(r4) - lwz r6, VCPU_GPR(r6)(r4) - lwz r7, VCPU_GPR(r7)(r4) - lwz r8, VCPU_GPR(r8)(r4) + lwz r5, VCPU_GPR(R5)(r4) + lwz r6, VCPU_GPR(R6)(r4) + lwz r7, VCPU_GPR(R7)(r4) + lwz r8, VCPU_GPR(R8)(r4) /* Clear any debug events which occurred since we disabled MSR[DE]. * XXX This gives us a 3-instruction window in which a breakpoint @@ -461,8 +461,8 @@ lightweight_exit: ori r3, r3, 0xffff mtspr SPRN_DBSR, r3 - lwz r3, VCPU_GPR(r3)(r4) - lwz r4, VCPU_GPR(r4)(r4) + lwz r3, VCPU_GPR(R3)(r4) + lwz r4, VCPU_GPR(R4)(r4) rfi #ifdef CONFIG_SPE diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index 6048a00515d7..a623b1d32d3e 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -67,15 +67,15 @@ */ .macro kvm_handler_common intno, srr0, flags /* Restore host stack pointer */ - PPC_STL r1, VCPU_GPR(r1)(r4) - PPC_STL r2, VCPU_GPR(r2)(r4) + PPC_STL r1, VCPU_GPR(R1)(r4) + PPC_STL r2, VCPU_GPR(R2)(r4) PPC_LL r1, VCPU_HOST_STACK(r4) PPC_LL r2, HOST_R2(r1) mfspr r10, SPRN_PID lwz r8, VCPU_HOST_PID(r4) PPC_LL r11, VCPU_SHARED(r4) - PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */ + PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */ li r14, \intno stw r10, VCPU_GUEST_PID(r4) @@ -137,27 +137,27 @@ */ mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */ - PPC_STL r15, VCPU_GPR(r15)(r4) - PPC_STL r16, VCPU_GPR(r16)(r4) - PPC_STL r17, VCPU_GPR(r17)(r4) - PPC_STL r18, VCPU_GPR(r18)(r4) - PPC_STL r19, VCPU_GPR(r19)(r4) + PPC_STL r15, VCPU_GPR(R15)(r4) + PPC_STL r16, VCPU_GPR(R16)(r4) + PPC_STL r17, VCPU_GPR(R17)(r4) + PPC_STL r18, VCPU_GPR(R18)(r4) + PPC_STL r19, VCPU_GPR(R19)(r4) mr r8, r3 - PPC_STL r20, VCPU_GPR(r20)(r4) + PPC_STL r20, VCPU_GPR(R20)(r4) rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS - PPC_STL r21, VCPU_GPR(r21)(r4) + PPC_STL r21, VCPU_GPR(R21)(r4) rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR - PPC_STL r22, VCPU_GPR(r22)(r4) + PPC_STL r22, VCPU_GPR(R22)(r4) rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID - PPC_STL r23, VCPU_GPR(r23)(r4) - PPC_STL r24, VCPU_GPR(r24)(r4) - PPC_STL r25, VCPU_GPR(r25)(r4) - PPC_STL r26, VCPU_GPR(r26)(r4) - PPC_STL r27, VCPU_GPR(r27)(r4) - PPC_STL r28, VCPU_GPR(r28)(r4) - PPC_STL r29, VCPU_GPR(r29)(r4) - PPC_STL r30, VCPU_GPR(r30)(r4) - PPC_STL r31, VCPU_GPR(r31)(r4) + PPC_STL r23, VCPU_GPR(R23)(r4) + PPC_STL r24, VCPU_GPR(R24)(r4) + PPC_STL r25, VCPU_GPR(R25)(r4) + PPC_STL r26, VCPU_GPR(R26)(r4) + PPC_STL r27, VCPU_GPR(R27)(r4) + PPC_STL r28, VCPU_GPR(R28)(r4) + PPC_STL r29, VCPU_GPR(R29)(r4) + PPC_STL r30, VCPU_GPR(R30)(r4) + PPC_STL r31, VCPU_GPR(R31)(r4) mtspr SPRN_EPLC, r8 /* disable preemption, so we are sure we hit the fixup handler */ @@ -211,24 +211,24 @@ .macro kvm_handler intno srr0, srr1, flags _GLOBAL(kvmppc_handler_\intno\()_\srr1) GET_VCPU(r11, r10) - PPC_STL r3, VCPU_GPR(r3)(r11) + PPC_STL r3, VCPU_GPR(R3)(r11) mfspr r3, SPRN_SPRG_RSCRATCH0 - PPC_STL r4, VCPU_GPR(r4)(r11) + PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, THREAD_NORMSAVE(0)(r10) - PPC_STL r5, VCPU_GPR(r5)(r11) + PPC_STL r5, VCPU_GPR(R5)(r11) stw r13, VCPU_CR(r11) mfspr r5, \srr0 - PPC_STL r3, VCPU_GPR(r10)(r11) + PPC_STL r3, VCPU_GPR(R10)(r11) PPC_LL r3, THREAD_NORMSAVE(2)(r10) - PPC_STL r6, VCPU_GPR(r6)(r11) - PPC_STL r4, VCPU_GPR(r11)(r11) + PPC_STL r6, VCPU_GPR(R6)(r11) + PPC_STL r4, VCPU_GPR(R11)(r11) mfspr r6, \srr1 - PPC_STL r7, VCPU_GPR(r7)(r11) - PPC_STL r8, VCPU_GPR(r8)(r11) - PPC_STL r9, VCPU_GPR(r9)(r11) - PPC_STL r3, VCPU_GPR(r13)(r11) + PPC_STL r7, VCPU_GPR(R7)(r11) + PPC_STL r8, VCPU_GPR(R8)(r11) + PPC_STL r9, VCPU_GPR(R9)(r11) + PPC_STL r3, VCPU_GPR(R13)(r11) mfctr r7 - PPC_STL r12, VCPU_GPR(r12)(r11) + PPC_STL r12, VCPU_GPR(R12)(r11) PPC_STL r7, VCPU_CTR(r11) mr r4, r11 kvm_handler_common \intno, \srr0, \flags @@ -238,25 +238,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1) _GLOBAL(kvmppc_handler_\intno\()_\srr1) mfspr r10, SPRN_SPRG_THREAD GET_VCPU(r11, r10) - PPC_STL r3, VCPU_GPR(r3)(r11) + PPC_STL r3, VCPU_GPR(R3)(r11) mfspr r3, \scratch - PPC_STL r4, VCPU_GPR(r4)(r11) + PPC_STL r4, VCPU_GPR(R4)(r11) PPC_LL r4, GPR9(r8) - PPC_STL r5, VCPU_GPR(r5)(r11) + PPC_STL r5, VCPU_GPR(R5)(r11) stw r9, VCPU_CR(r11) mfspr r5, \srr0 - PPC_STL r3, VCPU_GPR(r8)(r11) + PPC_STL r3, VCPU_GPR(R8)(r11) PPC_LL r3, GPR10(r8) - PPC_STL r6, VCPU_GPR(r6)(r11) - PPC_STL r4, VCPU_GPR(r9)(r11) + PPC_STL r6, VCPU_GPR(R6)(r11) + PPC_STL r4, VCPU_GPR(R9)(r11) mfspr r6, \srr1 PPC_LL r4, GPR11(r8) - PPC_STL r7, VCPU_GPR(r7)(r11) - PPC_STL r3, VCPU_GPR(r10)(r11) + PPC_STL r7, VCPU_GPR(R7)(r11) + PPC_STL r3, VCPU_GPR(R10)(r11) mfctr r7 - PPC_STL r12, VCPU_GPR(r12)(r11) - PPC_STL r13, VCPU_GPR(r13)(r11) - PPC_STL r4, VCPU_GPR(r11)(r11) + PPC_STL r12, VCPU_GPR(R12)(r11) + PPC_STL r13, VCPU_GPR(R13)(r11) + PPC_STL r4, VCPU_GPR(R11)(r11) PPC_STL r7, VCPU_CTR(r11) mr r4, r11 kvm_handler_common \intno, \srr0, \flags @@ -310,7 +310,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \ _GLOBAL(kvmppc_resume_host) /* Save remaining volatile guest register state to vcpu. */ mfspr r3, SPRN_VRSAVE - PPC_STL r0, VCPU_GPR(r0)(r4) + PPC_STL r0, VCPU_GPR(R0)(r4) mflr r5 mfspr r6, SPRN_SPRG4 PPC_STL r5, VCPU_LR(r4) @@ -358,27 +358,27 @@ _GLOBAL(kvmppc_resume_host) /* Restore vcpu pointer and the nonvolatiles we used. */ mr r4, r14 - PPC_LL r14, VCPU_GPR(r14)(r4) + PPC_LL r14, VCPU_GPR(R14)(r4) andi. r5, r3, RESUME_FLAG_NV beq skip_nv_load - PPC_LL r15, VCPU_GPR(r15)(r4) - PPC_LL r16, VCPU_GPR(r16)(r4) - PPC_LL r17, VCPU_GPR(r17)(r4) - PPC_LL r18, VCPU_GPR(r18)(r4) - PPC_LL r19, VCPU_GPR(r19)(r4) - PPC_LL r20, VCPU_GPR(r20)(r4) - PPC_LL r21, VCPU_GPR(r21)(r4) - PPC_LL r22, VCPU_GPR(r22)(r4) - PPC_LL r23, VCPU_GPR(r23)(r4) - PPC_LL r24, VCPU_GPR(r24)(r4) - PPC_LL r25, VCPU_GPR(r25)(r4) - PPC_LL r26, VCPU_GPR(r26)(r4) - PPC_LL r27, VCPU_GPR(r27)(r4) - PPC_LL r28, VCPU_GPR(r28)(r4) - PPC_LL r29, VCPU_GPR(r29)(r4) - PPC_LL r30, VCPU_GPR(r30)(r4) - PPC_LL r31, VCPU_GPR(r31)(r4) + PPC_LL r15, VCPU_GPR(R15)(r4) + PPC_LL r16, VCPU_GPR(R16)(r4) + PPC_LL r17, VCPU_GPR(R17)(r4) + PPC_LL r18, VCPU_GPR(R18)(r4) + PPC_LL r19, VCPU_GPR(R19)(r4) + PPC_LL r20, VCPU_GPR(R20)(r4) + PPC_LL r21, VCPU_GPR(R21)(r4) + PPC_LL r22, VCPU_GPR(R22)(r4) + PPC_LL r23, VCPU_GPR(R23)(r4) + PPC_LL r24, VCPU_GPR(R24)(r4) + PPC_LL r25, VCPU_GPR(R25)(r4) + PPC_LL r26, VCPU_GPR(R26)(r4) + PPC_LL r27, VCPU_GPR(R27)(r4) + PPC_LL r28, VCPU_GPR(R28)(r4) + PPC_LL r29, VCPU_GPR(R29)(r4) + PPC_LL r30, VCPU_GPR(R30)(r4) + PPC_LL r31, VCPU_GPR(R31)(r4) skip_nv_load: /* Should we return to the guest? */ andi. r5, r3, RESUME_FLAG_HOST @@ -396,23 +396,23 @@ heavyweight_exit: * non-volatiles. */ - PPC_STL r15, VCPU_GPR(r15)(r4) - PPC_STL r16, VCPU_GPR(r16)(r4) - PPC_STL r17, VCPU_GPR(r17)(r4) - PPC_STL r18, VCPU_GPR(r18)(r4) - PPC_STL r19, VCPU_GPR(r19)(r4) - PPC_STL r20, VCPU_GPR(r20)(r4) - PPC_STL r21, VCPU_GPR(r21)(r4) - PPC_STL r22, VCPU_GPR(r22)(r4) - PPC_STL r23, VCPU_GPR(r23)(r4) - PPC_STL r24, VCPU_GPR(r24)(r4) - PPC_STL r25, VCPU_GPR(r25)(r4) - PPC_STL r26, VCPU_GPR(r26)(r4) - PPC_STL r27, VCPU_GPR(r27)(r4) - PPC_STL r28, VCPU_GPR(r28)(r4) - PPC_STL r29, VCPU_GPR(r29)(r4) - PPC_STL r30, VCPU_GPR(r30)(r4) - PPC_STL r31, VCPU_GPR(r31)(r4) + PPC_STL r15, VCPU_GPR(R15)(r4) + PPC_STL r16, VCPU_GPR(R16)(r4) + PPC_STL r17, VCPU_GPR(R17)(r4) + PPC_STL r18, VCPU_GPR(R18)(r4) + PPC_STL r19, VCPU_GPR(R19)(r4) + PPC_STL r20, VCPU_GPR(R20)(r4) + PPC_STL r21, VCPU_GPR(R21)(r4) + PPC_STL r22, VCPU_GPR(R22)(r4) + PPC_STL r23, VCPU_GPR(R23)(r4) + PPC_STL r24, VCPU_GPR(R24)(r4) + PPC_STL r25, VCPU_GPR(R25)(r4) + PPC_STL r26, VCPU_GPR(R26)(r4) + PPC_STL r27, VCPU_GPR(R27)(r4) + PPC_STL r28, VCPU_GPR(R28)(r4) + PPC_STL r29, VCPU_GPR(R29)(r4) + PPC_STL r30, VCPU_GPR(R30)(r4) + PPC_STL r31, VCPU_GPR(R31)(r4) /* Load host non-volatile register state from host stack. */ PPC_LL r14, HOST_NV_GPR(r14)(r1) @@ -478,24 +478,24 @@ _GLOBAL(__kvmppc_vcpu_run) PPC_STL r31, HOST_NV_GPR(r31)(r1) /* Load guest non-volatiles. */ - PPC_LL r14, VCPU_GPR(r14)(r4) - PPC_LL r15, VCPU_GPR(r15)(r4) - PPC_LL r16, VCPU_GPR(r16)(r4) - PPC_LL r17, VCPU_GPR(r17)(r4) - PPC_LL r18, VCPU_GPR(r18)(r4) - PPC_LL r19, VCPU_GPR(r19)(r4) - PPC_LL r20, VCPU_GPR(r20)(r4) - PPC_LL r21, VCPU_GPR(r21)(r4) - PPC_LL r22, VCPU_GPR(r22)(r4) - PPC_LL r23, VCPU_GPR(r23)(r4) - PPC_LL r24, VCPU_GPR(r24)(r4) - PPC_LL r25, VCPU_GPR(r25)(r4) - PPC_LL r26, VCPU_GPR(r26)(r4) - PPC_LL r27, VCPU_GPR(r27)(r4) - PPC_LL r28, VCPU_GPR(r28)(r4) - PPC_LL r29, VCPU_GPR(r29)(r4) - PPC_LL r30, VCPU_GPR(r30)(r4) - PPC_LL r31, VCPU_GPR(r31)(r4) + PPC_LL r14, VCPU_GPR(R14)(r4) + PPC_LL r15, VCPU_GPR(R15)(r4) + PPC_LL r16, VCPU_GPR(R16)(r4) + PPC_LL r17, VCPU_GPR(R17)(r4) + PPC_LL r18, VCPU_GPR(R18)(r4) + PPC_LL r19, VCPU_GPR(R19)(r4) + PPC_LL r20, VCPU_GPR(R20)(r4) + PPC_LL r21, VCPU_GPR(R21)(r4) + PPC_LL r22, VCPU_GPR(R22)(r4) + PPC_LL r23, VCPU_GPR(R23)(r4) + PPC_LL r24, VCPU_GPR(R24)(r4) + PPC_LL r25, VCPU_GPR(R25)(r4) + PPC_LL r26, VCPU_GPR(R26)(r4) + PPC_LL r27, VCPU_GPR(R27)(r4) + PPC_LL r28, VCPU_GPR(R28)(r4) + PPC_LL r29, VCPU_GPR(R29)(r4) + PPC_LL r30, VCPU_GPR(R30)(r4) + PPC_LL r31, VCPU_GPR(R31)(r4) lightweight_exit: @@ -554,13 +554,13 @@ lightweight_exit: lwz r7, VCPU_CR(r4) PPC_LL r8, VCPU_PC(r4) PPC_LD(r9, VCPU_SHARED_MSR, r11) - PPC_LL r0, VCPU_GPR(r0)(r4) - PPC_LL r1, VCPU_GPR(r1)(r4) - PPC_LL r2, VCPU_GPR(r2)(r4) - PPC_LL r10, VCPU_GPR(r10)(r4) - PPC_LL r11, VCPU_GPR(r11)(r4) - PPC_LL r12, VCPU_GPR(r12)(r4) - PPC_LL r13, VCPU_GPR(r13)(r4) + PPC_LL r0, VCPU_GPR(R0)(r4) + PPC_LL r1, VCPU_GPR(R1)(r4) + PPC_LL r2, VCPU_GPR(R2)(r4) + PPC_LL r10, VCPU_GPR(R10)(r4) + PPC_LL r11, VCPU_GPR(R11)(r4) + PPC_LL r12, VCPU_GPR(R12)(r4) + PPC_LL r13, VCPU_GPR(R13)(r4) mtlr r3 mtxer r5 mtctr r6 @@ -586,12 +586,12 @@ lightweight_exit: mtcr r7 /* Finish loading guest volatiles and jump to guest. */ - PPC_LL r5, VCPU_GPR(r5)(r4) - PPC_LL r6, VCPU_GPR(r6)(r4) - PPC_LL r7, VCPU_GPR(r7)(r4) - PPC_LL r8, VCPU_GPR(r8)(r4) - PPC_LL r9, VCPU_GPR(r9)(r4) - - PPC_LL r3, VCPU_GPR(r3)(r4) - PPC_LL r4, VCPU_GPR(r4)(r4) + PPC_LL r5, VCPU_GPR(R5)(r4) + PPC_LL r6, VCPU_GPR(R6)(r4) + PPC_LL r7, VCPU_GPR(R7)(r4) + PPC_LL r8, VCPU_GPR(R8)(r4) + PPC_LL r9, VCPU_GPR(R9)(r4) + + PPC_LL r3, VCPU_GPR(R3)(r4) + PPC_LL r4, VCPU_GPR(R4)(r4) rfi |