diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s.c')
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 309 |
1 files changed, 262 insertions, 47 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 3e294bd9b8c6..9a271f0929c7 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -33,12 +33,9 @@ /* #define EXIT_DEBUG */ /* #define EXIT_DEBUG_SIMPLE */ +/* #define DEBUG_EXT */ -/* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0. - * When set, we retrigger a DEC interrupt after that if DEC <= 0. - * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */ - -/* #define AGGRESSIVE_DEC */ +static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); struct kvm_stats_debugfs_item debugfs_entries[] = { { "exits", VCPU_STAT(sum_exits) }, @@ -72,16 +69,24 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); + memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, + sizeof(get_paca()->shadow_vcpu)); get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; } void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) { memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); + memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, + sizeof(get_paca()->shadow_vcpu)); to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; + + kvmppc_giveup_ext(vcpu, MSR_FP); + kvmppc_giveup_ext(vcpu, MSR_VEC); + kvmppc_giveup_ext(vcpu, MSR_VSX); } -#if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG) +#if defined(EXIT_DEBUG) static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) { u64 jd = mftb() - vcpu->arch.dec_jiffies; @@ -89,6 +94,23 @@ static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) } #endif +static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) +{ + vcpu->arch.shadow_msr = vcpu->arch.msr; + /* Guest MSR values */ + vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | + MSR_BE | MSR_DE; + /* Process MSR values */ + vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | + MSR_EE; + /* External providers the guest reserved */ + vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); + /* 64-bit Process MSR values */ +#ifdef CONFIG_PPC_BOOK3S_64 + vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; +#endif +} + void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) { ulong old_msr = vcpu->arch.msr; @@ -96,12 +118,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) #ifdef EXIT_DEBUG printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); #endif + msr &= to_book3s(vcpu)->msr_mask; vcpu->arch.msr = msr; - vcpu->arch.shadow_msr = msr | MSR_USER32; - vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 | - MSR_USER64 | MSR_SE | MSR_BE | MSR_DE | - MSR_FE1); + kvmppc_recalc_shadow_msr(vcpu); if (msr & (MSR_WE|MSR_POW)) { if (!vcpu->arch.pending_exceptions) { @@ -125,11 +145,10 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) vcpu->arch.mmu.reset_msr(vcpu); } -void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) +static int kvmppc_book3s_vec2irqprio(unsigned int vec) { unsigned int prio; - vcpu->stat.queue_intr++; switch (vec) { case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; @@ -149,15 +168,31 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) default: prio = BOOK3S_IRQPRIO_MAX; break; } - set_bit(prio, &vcpu->arch.pending_exceptions); + return prio; +} + +static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, + unsigned int vec) +{ + clear_bit(kvmppc_book3s_vec2irqprio(vec), + &vcpu->arch.pending_exceptions); +} + +void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) +{ + vcpu->stat.queue_intr++; + + set_bit(kvmppc_book3s_vec2irqprio(vec), + &vcpu->arch.pending_exceptions); #ifdef EXIT_DEBUG printk(KERN_INFO "Queueing interrupt %x\n", vec); #endif } -void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) +void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) { + to_book3s(vcpu)->prog_flags = flags; kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); } @@ -171,6 +206,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); } +void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) +{ + kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); +} + void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) { @@ -181,6 +221,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) { int deliver = 1; int vec = 0; + ulong flags = 0ULL; switch (priority) { case BOOK3S_IRQPRIO_DECREMENTER: @@ -214,6 +255,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) break; case BOOK3S_IRQPRIO_PROGRAM: vec = BOOK3S_INTERRUPT_PROGRAM; + flags = to_book3s(vcpu)->prog_flags; break; case BOOK3S_IRQPRIO_VSX: vec = BOOK3S_INTERRUPT_VSX; @@ -244,7 +286,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) #endif if (deliver) - kvmppc_inject_interrupt(vcpu, vec, 0ULL); + kvmppc_inject_interrupt(vcpu, vec, flags); return deliver; } @@ -254,21 +296,15 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) unsigned long *pending = &vcpu->arch.pending_exceptions; unsigned int priority; - /* XXX be more clever here - no need to mftb() on every entry */ - /* Issue DEC again if it's still active */ -#ifdef AGGRESSIVE_DEC - if (vcpu->arch.msr & MSR_EE) - if (kvmppc_get_dec(vcpu) & 0x80000000) - kvmppc_core_queue_dec(vcpu); -#endif - #ifdef EXIT_DEBUG if (vcpu->arch.pending_exceptions) printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); #endif priority = __ffs(*pending); while (priority <= (sizeof(unsigned int) * 8)) { - if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) { + if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && + (priority != BOOK3S_IRQPRIO_DECREMENTER)) { + /* DEC interrupts get cleared by mtdec */ clear_bit(priority, &vcpu->arch.pending_exceptions); break; } @@ -503,14 +539,14 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, /* Page not found in guest PTE entries */ vcpu->arch.dear = vcpu->arch.fault_dear; to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; - vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); + vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EPERM) { /* Storage protection */ vcpu->arch.dear = vcpu->arch.fault_dear; to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; - vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); + vcpu->arch.msr |= (vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL); kvmppc_book3s_queue_irqprio(vcpu, vec); } else if (page_found == -EINVAL) { /* Page not found in guest SLB */ @@ -532,13 +568,122 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, r = kvmppc_emulate_mmio(run, vcpu); if ( r == RESUME_HOST_NV ) r = RESUME_HOST; - if ( r == RESUME_GUEST_NV ) - r = RESUME_GUEST; } return r; } +static inline int get_fpr_index(int i) +{ +#ifdef CONFIG_VSX + i *= 2; +#endif + return i; +} + +/* Give up external provider (FPU, Altivec, VSX) */ +static void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) +{ + struct thread_struct *t = ¤t->thread; + u64 *vcpu_fpr = vcpu->arch.fpr; + u64 *vcpu_vsx = vcpu->arch.vsr; + u64 *thread_fpr = (u64*)t->fpr; + int i; + + if (!(vcpu->arch.guest_owned_ext & msr)) + return; + +#ifdef DEBUG_EXT + printk(KERN_INFO "Giving up ext 0x%lx\n", msr); +#endif + + switch (msr) { + case MSR_FP: + giveup_fpu(current); + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) + vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; + + vcpu->arch.fpscr = t->fpscr.val; + break; + case MSR_VEC: +#ifdef CONFIG_ALTIVEC + giveup_altivec(current); + memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); + vcpu->arch.vscr = t->vscr; +#endif + break; + case MSR_VSX: +#ifdef CONFIG_VSX + __giveup_vsx(current); + for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) + vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; +#endif + break; + default: + BUG(); + } + + vcpu->arch.guest_owned_ext &= ~msr; + current->thread.regs->msr &= ~msr; + kvmppc_recalc_shadow_msr(vcpu); +} + +/* Handle external providers (FPU, Altivec, VSX) */ +static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, + ulong msr) +{ + struct thread_struct *t = ¤t->thread; + u64 *vcpu_fpr = vcpu->arch.fpr; + u64 *vcpu_vsx = vcpu->arch.vsr; + u64 *thread_fpr = (u64*)t->fpr; + int i; + + if (!(vcpu->arch.msr & msr)) { + kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + return RESUME_GUEST; + } + +#ifdef DEBUG_EXT + printk(KERN_INFO "Loading up ext 0x%lx\n", msr); +#endif + + current->thread.regs->msr |= msr; + + switch (msr) { + case MSR_FP: + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) + thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; + + t->fpscr.val = vcpu->arch.fpscr; + t->fpexc_mode = 0; + kvmppc_load_up_fpu(); + break; + case MSR_VEC: +#ifdef CONFIG_ALTIVEC + memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); + t->vscr = vcpu->arch.vscr; + t->vrsave = -1; + kvmppc_load_up_altivec(); +#endif + break; + case MSR_VSX: +#ifdef CONFIG_VSX + for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) + thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; + kvmppc_load_up_vsx(); +#endif + break; + default: + BUG(); + } + + vcpu->arch.guest_owned_ext |= msr; + + kvmppc_recalc_shadow_msr(vcpu); + + return RESUME_GUEST; +} + int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int exit_nr) { @@ -563,7 +708,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_INST_STORAGE: vcpu->stat.pf_instruc++; /* only care about PTEG not found errors, but leave NX alone */ - if (vcpu->arch.shadow_msr & 0x40000000) { + if (vcpu->arch.shadow_srr1 & 0x40000000) { r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); vcpu->stat.sp_instruc++; } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && @@ -575,7 +720,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, */ kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); } else { - vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); + vcpu->arch.msr |= vcpu->arch.shadow_srr1 & 0x58000000; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); r = RESUME_GUEST; @@ -621,6 +766,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, case BOOK3S_INTERRUPT_PROGRAM: { enum emulation_result er; + ulong flags; + + flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; if (vcpu->arch.msr & MSR_PR) { #ifdef EXIT_DEBUG @@ -628,7 +776,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, #endif if ((vcpu->arch.last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) { - kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; } @@ -638,12 +786,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, er = kvmppc_emulate_instruction(run, vcpu); switch (er) { case EMULATE_DONE: - r = RESUME_GUEST; + r = RESUME_GUEST_NV; break; case EMULATE_FAIL: printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", __func__, vcpu->arch.pc, vcpu->arch.last_inst); - kvmppc_book3s_queue_irqprio(vcpu, exit_nr); + kvmppc_core_queue_program(vcpu, flags); r = RESUME_GUEST; break; default: @@ -653,23 +801,30 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, } case BOOK3S_INTERRUPT_SYSCALL: #ifdef EXIT_DEBUG - printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]); + printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); #endif vcpu->stat.syscall_exits++; kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; break; - case BOOK3S_INTERRUPT_MACHINE_CHECK: case BOOK3S_INTERRUPT_FP_UNAVAIL: - case BOOK3S_INTERRUPT_TRACE: + r = kvmppc_handle_ext(vcpu, exit_nr, MSR_FP); + break; case BOOK3S_INTERRUPT_ALTIVEC: + r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VEC); + break; case BOOK3S_INTERRUPT_VSX: + r = kvmppc_handle_ext(vcpu, exit_nr, MSR_VSX); + break; + case BOOK3S_INTERRUPT_MACHINE_CHECK: + case BOOK3S_INTERRUPT_TRACE: kvmppc_book3s_queue_irqprio(vcpu, exit_nr); r = RESUME_GUEST; break; default: /* Ugh - bork here! What did we get? */ - printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); + printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", + exit_nr, vcpu->arch.pc, vcpu->arch.shadow_srr1); r = RESUME_HOST; BUG(); break; @@ -712,10 +867,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int i; regs->pc = vcpu->arch.pc; - regs->cr = vcpu->arch.cr; + regs->cr = kvmppc_get_cr(vcpu); regs->ctr = vcpu->arch.ctr; regs->lr = vcpu->arch.lr; - regs->xer = vcpu->arch.xer; + regs->xer = kvmppc_get_xer(vcpu); regs->msr = vcpu->arch.msr; regs->srr0 = vcpu->arch.srr0; regs->srr1 = vcpu->arch.srr1; @@ -729,7 +884,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) regs->sprg7 = vcpu->arch.sprg6; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) - regs->gpr[i] = vcpu->arch.gpr[i]; + regs->gpr[i] = kvmppc_get_gpr(vcpu, i); return 0; } @@ -739,10 +894,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) int i; vcpu->arch.pc = regs->pc; - vcpu->arch.cr = regs->cr; + kvmppc_set_cr(vcpu, regs->cr); vcpu->arch.ctr = regs->ctr; vcpu->arch.lr = regs->lr; - vcpu->arch.xer = regs->xer; + kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); vcpu->arch.srr0 = regs->srr0; vcpu->arch.srr1 = regs->srr1; @@ -754,8 +909,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) vcpu->arch.sprg6 = regs->sprg5; vcpu->arch.sprg7 = regs->sprg6; - for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) - vcpu->arch.gpr[i] = regs->gpr[i]; + for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) + kvmppc_set_gpr(vcpu, i, regs->gpr[i]); return 0; } @@ -850,7 +1005,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, int is_dirty = 0; int r, n; - down_write(&kvm->slots_lock); + mutex_lock(&kvm->slots_lock); r = kvm_get_dirty_log(kvm, log, &is_dirty); if (r) @@ -858,7 +1013,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { - memslot = &kvm->memslots[log->slot]; + memslot = &kvm->memslots->memslots[log->slot]; ga = memslot->base_gfn << PAGE_SHIFT; ga_end = ga + (memslot->npages << PAGE_SHIFT); @@ -872,7 +1027,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, r = 0; out: - up_write(&kvm->slots_lock); + mutex_unlock(&kvm->slots_lock); return r; } @@ -910,6 +1065,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; + vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; vcpu->arch.shadow_msr = MSR_USER64; @@ -943,6 +1099,10 @@ extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret; + struct thread_struct ext_bkp; + bool save_vec = current->thread.used_vr; + bool save_vsx = current->thread.used_vsr; + ulong ext_msr; /* No need to go into the guest when all we do is going out */ if (signal_pending(current)) { @@ -950,6 +1110,35 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return -EINTR; } + /* Save FPU state in stack */ + if (current->thread.regs->msr & MSR_FP) + giveup_fpu(current); + memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr)); + ext_bkp.fpscr = current->thread.fpscr; + ext_bkp.fpexc_mode = current->thread.fpexc_mode; + +#ifdef CONFIG_ALTIVEC + /* Save Altivec state in stack */ + if (save_vec) { + if (current->thread.regs->msr & MSR_VEC) + giveup_altivec(current); + memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr)); + ext_bkp.vscr = current->thread.vscr; + ext_bkp.vrsave = current->thread.vrsave; + } + ext_bkp.used_vr = current->thread.used_vr; +#endif + +#ifdef CONFIG_VSX + /* Save VSX state in stack */ + if (save_vsx && (current->thread.regs->msr & MSR_VSX)) + __giveup_vsx(current); + ext_bkp.used_vsr = current->thread.used_vsr; +#endif + + /* Remember the MSR with disabled extensions */ + ext_msr = current->thread.regs->msr; + /* XXX we get called with irq disabled - change that! */ local_irq_enable(); @@ -957,6 +1146,32 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) local_irq_disable(); + current->thread.regs->msr = ext_msr; + + /* Make sure we save the guest FPU/Altivec/VSX state */ + kvmppc_giveup_ext(vcpu, MSR_FP); + kvmppc_giveup_ext(vcpu, MSR_VEC); + kvmppc_giveup_ext(vcpu, MSR_VSX); + + /* Restore FPU state from stack */ + memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr)); + current->thread.fpscr = ext_bkp.fpscr; + current->thread.fpexc_mode = ext_bkp.fpexc_mode; + +#ifdef CONFIG_ALTIVEC + /* Restore Altivec state from stack */ + if (save_vec && current->thread.used_vr) { + memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr)); + current->thread.vscr = ext_bkp.vscr; + current->thread.vrsave= ext_bkp.vrsave; + } + current->thread.used_vr = ext_bkp.used_vr; +#endif + +#ifdef CONFIG_VSX + current->thread.used_vsr = ext_bkp.used_vsr; +#endif + return ret; } |