summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S4
3 files changed, 6 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 855ba4d9539d..562f685d1678 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -587,6 +587,7 @@ struct kvm_vcpu_arch {
u32 mmucfg;
u32 eptcfg;
u32 epr;
+ u64 sprg9;
u32 pwrmgtcr0;
u32 crit_save;
/* guest debug registers*/
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 17ffcb4f27f9..ab9ae0411e8f 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -668,6 +668,7 @@ int main(void)
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+ DEFINE(VCPU_SPRG9, offsetof(struct kvm_vcpu, arch.sprg9));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index e000b397ece3..b4f8fbafee7a 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -398,6 +398,7 @@ _GLOBAL(kvmppc_resume_host)
#ifdef CONFIG_64BIT
PPC_LL r3, PACA_SPRG_VDSO(r13)
#endif
+ mfspr r5, SPRN_SPRG9
PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
mfspr r8, SPRN_SPRG6
PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
@@ -405,6 +406,7 @@ _GLOBAL(kvmppc_resume_host)
#ifdef CONFIG_64BIT
mtspr SPRN_SPRG_VDSO_WRITE, r3
#endif
+ PPC_STD(r5, VCPU_SPRG9, r4)
PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
mfxer r3
PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
@@ -639,7 +641,9 @@ lightweight_exit:
mtspr SPRN_SPRG5W, r6
PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
mtspr SPRN_SPRG6W, r7
+ PPC_LD(r5, VCPU_SPRG9, r4)
mtspr SPRN_SPRG7W, r8
+ mtspr SPRN_SPRG9, r5
/* Load some guest volatiles. */
PPC_LL r3, VCPU_LR(r4)