diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-02 15:56:17 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-02 15:56:17 -0800 |
commit | b7d626606201c397319b40721ca558b7e54040d5 (patch) | |
tree | 465c4004c514c3d47f4f7f60bed99cb19b8e5ec8 | |
parent | e6d9f0fb5f9ceca5f4945f12cd3e71efd3382c13 (diff) | |
parent | 6c475352e87224a8f0b8cc6f6cc96b30563dc5b4 (diff) | |
download | linux-b7d626606201c397319b40721ca558b7e54040d5.tar.gz linux-b7d626606201c397319b40721ca558b7e54040d5.tar.bz2 linux-b7d626606201c397319b40721ca558b7e54040d5.zip |
Merge branch 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm
* 'kvm-updates/2.6.28' of git://git.kernel.org/pub/scm/linux/kernel/git/avi/kvm:
KVM: MMU: avoid creation of unreachable pages in the shadow
KVM: ppc: stop leaking host memory on VM exit
KVM: MMU: fix sync of ptes addressed at owner pagetable
KVM: ia64: Fix: Use correct calling convention for PAL_VPS_RESUME_HANDLER
KVM: ia64: Fix incorrect kbuild CFLAGS override
KVM: VMX: Fix interrupt loss during race with NMI
KVM: s390: Fix problem state handling in guest sigp handler
-rw-r--r-- | arch/ia64/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/ia64/kvm/optvfault.S | 11 | ||||
-rw-r--r-- | arch/powerpc/include/asm/kvm_ppc.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 8 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 1 | ||||
-rw-r--r-- | arch/s390/kvm/sigp.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 4 |
9 files changed, 29 insertions, 7 deletions
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile index 3ab4d6d50704..92cef66ca268 100644 --- a/arch/ia64/kvm/Makefile +++ b/arch/ia64/kvm/Makefile @@ -58,7 +58,7 @@ endif kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o obj-$(CONFIG_KVM) += kvm.o -EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 +CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ vtlb.o process.o #Add link memcpy and memset to avoid possible structure assignment error diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S index 634abad979b5..32254ce9a1bd 100644 --- a/arch/ia64/kvm/optvfault.S +++ b/arch/ia64/kvm/optvfault.S @@ -107,10 +107,10 @@ END(kvm_vps_resume_normal) GLOBAL_ENTRY(kvm_vps_resume_handler) movl r30 = PAL_VPS_RESUME_HANDLER ;; - ld8 r27=[r25] + ld8 r26=[r25] shr r17=r17,IA64_ISR_IR_BIT ;; - dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE + dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE mov pr=r23,-2 br.sptk.many kvm_vps_entry END(kvm_vps_resume_handler) @@ -894,12 +894,15 @@ ENTRY(kvm_resume_to_guest) ;; ld8 r19=[r19] mov b0=r29 - cmp.ne p6,p7 = r0,r0 + mov r27=cr.isr ;; - tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic + tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic + shr r27=r27,IA64_ISR_IR_BIT ;; (p6) ld8 r26=[r25] (p7) mov b0=r28 + ;; + (p6) dep r26=r27,r26,63,1 mov pr=r31,-2 br.sptk.many b0 // call pal service ;; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 8931ba729d2b..bb62ad876de3 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -104,4 +104,6 @@ static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid) } } +extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); + #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 2e227a412bc2..ad72c6f9811f 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c @@ -124,6 +124,14 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu, } } +void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu) +{ + int i; + + for (i = 0; i <= tlb_44x_hwater; i++) + kvmppc_44x_shadow_release(vcpu, i); +} + void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) { vcpu->arch.shadow_tlb_mod[i] = 1; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 90a6fc422b23..fda9baada132 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -238,6 +238,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { + kvmppc_core_destroy_mmu(vcpu); } /* Note: clearing MSR[DE] just means that the debug interrupt will not be diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 170392687ce0..2a01b9e02801 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c @@ -237,6 +237,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu) u8 order_code; int rc; + /* sigp in userspace can exit */ + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) + return kvm_s390_inject_program_int(vcpu, + PGM_PRIVILEGED_OPERATION); + order_code = disp2; if (base2) order_code += vcpu->arch.guest_gprs[base2]; diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f1983d9477cd..410ddbc1aa2e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1038,13 +1038,13 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) } rmap_write_protect(vcpu->kvm, sp->gfn); + kvm_unlink_unsync_page(vcpu->kvm, sp); if (vcpu->arch.mmu.sync_page(vcpu, sp)) { kvm_mmu_zap_page(vcpu->kvm, sp); return 1; } kvm_mmu_flush_tlb(vcpu); - kvm_unlink_unsync_page(vcpu->kvm, sp); return 0; } diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 613ec9aa674a..84eee43bbe74 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -331,6 +331,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw, r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], &curr_pte, sizeof(curr_pte)); if (r || curr_pte != gw->ptes[level - 2]) { + kvm_mmu_put_page(shadow_page, sptep); kvm_release_pfn_clean(sw->pfn); sw->sptep = NULL; return 1; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d06b4dc0e2ea..a4018b01e1f9 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3149,7 +3149,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) if (cpu_has_virtual_nmis()) { if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { - if (vmx_nmi_enabled(vcpu)) { + if (vcpu->arch.interrupt.pending) { + enable_nmi_window(vcpu); + } else if (vmx_nmi_enabled(vcpu)) { vcpu->arch.nmi_pending = false; vcpu->arch.nmi_injected = true; } else { |