summaryrefslogtreecommitdiffstats
path: root/arch/mips/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r--arch/mips/kvm/interrupt.h4
-rw-r--r--arch/mips/kvm/loongson_ipi.c2
-rw-r--r--arch/mips/kvm/mips.c17
-rw-r--r--arch/mips/kvm/mmu.c56
-rw-r--r--arch/mips/kvm/vz.c8
5 files changed, 25 insertions, 62 deletions
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index e529ea2bb34b..07bc0160bc94 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -37,3 +37,7 @@ u32 kvm_irq_to_priority(u32 irq);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
+
+#ifdef CONFIG_CPU_LOONGSON64
+extern void kvm_init_loongson_ipi(struct kvm *kvm);
+#endif
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c
index 5d53f32d837c..6ac83a31148c 100644
--- a/arch/mips/kvm/loongson_ipi.c
+++ b/arch/mips/kvm/loongson_ipi.c
@@ -10,6 +10,8 @@
#include <linux/kvm_host.h>
+#include "interrupt.h"
+
#define IPI_BASE 0x3ff01000ULL
#define CORE0_STATUS_OFF 0x000
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 231ac052b506..cef3c423a41a 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -125,18 +125,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return 1;
}
-int kvm_arch_hardware_enable(void)
+int kvm_arch_enable_virtualization_cpu(void)
{
- return kvm_mips_callbacks->hardware_enable();
+ return kvm_mips_callbacks->enable_virtualization_cpu();
}
-void kvm_arch_hardware_disable(void)
+void kvm_arch_disable_virtualization_cpu(void)
{
- kvm_mips_callbacks->hardware_disable();
+ kvm_mips_callbacks->disable_virtualization_cpu();
}
-extern void kvm_init_loongson_ipi(struct kvm *kvm);
-
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
switch (type) {
@@ -290,9 +288,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
return err;
- hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+ hrtimer_setup(&vcpu->arch.comparecount_timer, kvm_mips_comparecount_wakeup, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
/*
* Allocate space for host mode exception handlers that handle
@@ -436,7 +433,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
vcpu->mmio_needed = 0;
}
- if (vcpu->run->immediate_exit)
+ if (!vcpu->wants_to_run)
goto out;
lose_fpu(1);
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index 467ee6b95ae1..d2c3b6b41f18 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -444,36 +444,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
return true;
}
-bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
-{
- gpa_t gpa = range->start << PAGE_SHIFT;
- pte_t hva_pte = range->arg.pte;
- pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
- pte_t old_pte;
-
- if (!gpa_pte)
- return false;
-
- /* Mapping may need adjusting depending on memslot flags */
- old_pte = *gpa_pte;
- if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
- hva_pte = pte_mkclean(hva_pte);
- else if (range->slot->flags & KVM_MEM_READONLY)
- hva_pte = pte_wrprotect(hva_pte);
-
- set_pte(gpa_pte, hva_pte);
-
- /* Replacing an absent or old page doesn't need flushes */
- if (!pte_present(old_pte) || !pte_young(old_pte))
- return false;
-
- /* Pages swapped, aged, moved, or cleaned require flushes */
- return !pte_present(hva_pte) ||
- !pte_young(hva_pte) ||
- pte_pfn(old_pte) != pte_pfn(hva_pte) ||
- (pte_dirty(old_pte) && !pte_dirty(hva_pte));
-}
-
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end);
@@ -514,8 +484,6 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
struct kvm *kvm = vcpu->kvm;
gfn_t gfn = gpa >> PAGE_SHIFT;
pte_t *ptep;
- kvm_pfn_t pfn = 0; /* silence bogus GCC warning */
- bool pfn_valid = false;
int ret = 0;
spin_lock(&kvm->mmu_lock);
@@ -528,12 +496,9 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
}
/* Track access to pages marked old */
- if (!pte_young(*ptep)) {
+ if (!pte_young(*ptep))
set_pte(ptep, pte_mkyoung(*ptep));
- pfn = pte_pfn(*ptep);
- pfn_valid = true;
- /* call kvm_set_pfn_accessed() after unlock */
- }
+
if (write_fault && !pte_dirty(*ptep)) {
if (!pte_write(*ptep)) {
ret = -EFAULT;
@@ -542,9 +507,7 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
/* Track dirtying of writeable pages */
set_pte(ptep, pte_mkdirty(*ptep));
- pfn = pte_pfn(*ptep);
mark_page_dirty(kvm, gfn);
- kvm_set_pfn_dirty(pfn);
}
if (out_entry)
@@ -554,8 +517,6 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
out:
spin_unlock(&kvm->mmu_lock);
- if (pfn_valid)
- kvm_set_pfn_accessed(pfn);
return ret;
}
@@ -596,6 +557,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
bool writeable;
unsigned long prot_bits;
unsigned long mmu_seq;
+ struct page *page;
/* Try the fast path to handle old / clean pages */
srcu_idx = srcu_read_lock(&kvm->srcu);
@@ -617,7 +579,7 @@ retry:
mmu_seq = kvm->mmu_invalidate_seq;
/*
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
- * in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
+ * in kvm_faultin_pfn() (which calls get_user_pages()), so that we don't
* risk the page we get a reference to getting unmapped before we have a
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
*
@@ -629,7 +591,7 @@ retry:
smp_rmb();
/* Slow path - ask KVM core whether we can access this GPA */
- pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
+ pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writeable, &page);
if (is_error_noslot_pfn(pfn)) {
err = -EFAULT;
goto out;
@@ -641,10 +603,10 @@ retry:
/*
* This can happen when mappings are changed asynchronously, but
* also synchronously if a COW is triggered by
- * gfn_to_pfn_prot().
+ * kvm_faultin_pfn().
*/
spin_unlock(&kvm->mmu_lock);
- kvm_release_pfn_clean(pfn);
+ kvm_release_page_unused(page);
goto retry;
}
@@ -658,7 +620,6 @@ retry:
if (write_fault) {
prot_bits |= __WRITEABLE;
mark_page_dirty(kvm, gfn);
- kvm_set_pfn_dirty(pfn);
}
}
entry = pfn_pte(pfn, __pgprot(prot_bits));
@@ -672,9 +633,8 @@ retry:
if (out_buddy)
*out_buddy = *ptep_buddy(ptep);
+ kvm_release_faultin_page(kvm, page, false, writeable);
spin_unlock(&kvm->mmu_lock);
- kvm_release_pfn_clean(pfn);
- kvm_set_pfn_accessed(pfn);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
return err;
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 99d5a71e4300..ccab4d76b126 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -2869,7 +2869,7 @@ static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
return ret + 1;
}
-static int kvm_vz_hardware_enable(void)
+static int kvm_vz_enable_virtualization_cpu(void)
{
unsigned int mmu_size, guest_mmu_size, ftlb_size;
u64 guest_cvmctl, cvmvmconfig;
@@ -2983,7 +2983,7 @@ static int kvm_vz_hardware_enable(void)
return 0;
}
-static void kvm_vz_hardware_disable(void)
+static void kvm_vz_disable_virtualization_cpu(void)
{
u64 cvmvmconfig;
unsigned int mmu_size;
@@ -3280,8 +3280,8 @@ static struct kvm_mips_callbacks kvm_vz_callbacks = {
.handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
.handle_guest_exit = kvm_trap_vz_handle_guest_exit,
- .hardware_enable = kvm_vz_hardware_enable,
- .hardware_disable = kvm_vz_hardware_disable,
+ .enable_virtualization_cpu = kvm_vz_enable_virtualization_cpu,
+ .disable_virtualization_cpu = kvm_vz_disable_virtualization_cpu,
.check_extension = kvm_vz_check_extension,
.vcpu_init = kvm_vz_vcpu_init,
.vcpu_uninit = kvm_vz_vcpu_uninit,