summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-01-08 02:58:06 +0100
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 12:35:49 -0300
commit021ec9c69f8b7b20f46296cc76cc4cb341b25191 (patch)
tree304f086761e7c01fb412c8319b89ff8b6fb2dde7 /arch/powerpc/kernel
parentbc90923e27908ef65aa8aaad2f234e18b5273c78 (diff)
downloadlinux-stable-021ec9c69f8b7b20f46296cc76cc4cb341b25191.tar.gz
linux-stable-021ec9c69f8b7b20f46296cc76cc4cb341b25191.tar.bz2
linux-stable-021ec9c69f8b7b20f46296cc76cc4cb341b25191.zip
KVM: PPC: Call SLB patching code in interrupt safe manner
Currently we're racy when doing the transition from IR=1 to IR=0, from the module memory entry code to the real mode SLB switching code. To work around that I took a look at the RTAS entry code which is faced with a similar problem and did the same thing: A small helper in linear mapped memory that does mtmsr with IR=0 and then RFIs info the actual handler. Thanks to that trick we can safely take page faults in the entry code and only need to be really wary of what to do as of the SLB switching part. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 1501e77c980c..ee9935442f0e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -214,8 +214,6 @@ int main(void)
DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
shadow_vcpu.vmhandler));
- DEFINE(PACA_KVM_RMHANDLER, offsetof(struct paca_struct,
- shadow_vcpu.rmhandler));
DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
shadow_vcpu.scratch0));
DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
@@ -438,6 +436,7 @@ int main(void)
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
+ DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
#else
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));