summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 14:32:31 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 14:32:31 -0700
commit8a5de18239e418fe7b1f36504834689f754d8ccc (patch)
tree8d05ae77da1d4a8512b6052e2ba23571543666c7 /arch/arm64
parent857b50f5d0eed113428c864e927289d8f5f2b864 (diff)
parent2df36a5dd6792870bef48f63bfca42055ea5b79c (diff)
downloadlinux-8a5de18239e418fe7b1f36504834689f754d8ccc.tar.gz
linux-8a5de18239e418fe7b1f36504834689f754d8ccc.tar.bz2
linux-8a5de18239e418fe7b1f36504834689f754d8ccc.zip
Merge tag 'kvm-arm-for-3.18-take-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm
Pull second batch of changes for KVM/{arm,arm64} from Marc Zyngier: "The most obvious thing is the sizeable MMU changes to support 48bit VAs on arm64. Summary: - support for 48bit IPA and VA (EL2) - a number of fixes for devices mapped into guests - yet another VGIC fix for BE - a fix for CPU hotplug - a few compile fixes (disabled VGIC, strict mm checks)" [ I'm pulling directly from Marc at the request of Paolo Bonzini, whose backpack was stolen at Düsseldorf airport and will do new keys and rebuild his web of trust. - Linus ] * tag 'kvm-arm-for-3.18-take-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm: arm/arm64: KVM: Fix BE accesses to GICv2 EISR and ELRSR regs arm: kvm: STRICT_MM_TYPECHECKS fix for user_mem_abort arm/arm64: KVM: Ensure memslots are within KVM_PHYS_SIZE arm64: KVM: Implement 48 VA support for KVM EL2 and Stage-2 arm/arm64: KVM: map MMIO regions at creation time arm64: kvm: define PAGE_S2_DEVICE as read-only by default ARM: kvm: define PAGE_S2_DEVICE as read-only by default arm/arm64: KVM: add 'writable' parameter to kvm_phys_addr_ioremap arm/arm64: KVM: fix potential NULL dereference in user_mem_abort() arm/arm64: KVM: use __GFP_ZERO not memset() to get zeroed pages ARM: KVM: fix vgic-disabled build arm: kvm: fix CPU hotplug
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h127
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/kvm/vgic-v2-switch.S12
3 files changed, 126 insertions, 15 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index a030d163840b..0caf7a59f6a1 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -41,6 +41,18 @@
*/
#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
+/*
+ * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
+ * levels in addition to the PGD and potentially the PUD which are
+ * pre-allocated (we pre-allocate the fake PGD and the PUD when the Stage-2
+ * tables use one level of tables less than the kernel.
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define KVM_MMU_CACHE_MIN_PAGES 1
+#else
+#define KVM_MMU_CACHE_MIN_PAGES 2
+#endif
+
#ifdef __ASSEMBLY__
/*
@@ -53,6 +65,7 @@
#else
+#include <asm/pgalloc.h>
#include <asm/cachetype.h>
#include <asm/cacheflush.h>
@@ -65,10 +78,6 @@
#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
-/* Make sure we get the right size, and thus the right alignment */
-#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - PGDIR_SHIFT))
-#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
-
int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_boot_hyp_pgd(void);
@@ -77,7 +86,7 @@ void free_hyp_pgds(void);
int kvm_alloc_stage2_pgd(struct kvm *kvm);
void kvm_free_stage2_pgd(struct kvm *kvm);
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
- phys_addr_t pa, unsigned long size);
+ phys_addr_t pa, unsigned long size, bool writable);
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
@@ -93,6 +102,7 @@ void kvm_clear_hyp_idmap(void);
#define kvm_set_pmd(pmdp, pmd) set_pmd(pmdp, pmd)
static inline void kvm_clean_pgd(pgd_t *pgd) {}
+static inline void kvm_clean_pmd(pmd_t *pmd) {}
static inline void kvm_clean_pmd_entry(pmd_t *pmd) {}
static inline void kvm_clean_pte(pte_t *pte) {}
static inline void kvm_clean_pte_entry(pte_t *pte) {}
@@ -111,19 +121,116 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
#define kvm_pud_addr_end(addr, end) pud_addr_end(addr, end)
#define kvm_pmd_addr_end(addr, end) pmd_addr_end(addr, end)
+/*
+ * In the case where PGDIR_SHIFT is larger than KVM_PHYS_SHIFT, we can address
+ * the entire IPA input range with a single pgd entry, and we would only need
+ * one pgd entry. Note that in this case, the pgd is actually not used by
+ * the MMU for Stage-2 translations, but is merely a fake pgd used as a data
+ * structure for the kernel pgtable macros to work.
+ */
+#if PGDIR_SHIFT > KVM_PHYS_SHIFT
+#define PTRS_PER_S2_PGD_SHIFT 0
+#else
+#define PTRS_PER_S2_PGD_SHIFT (KVM_PHYS_SHIFT - PGDIR_SHIFT)
+#endif
+#define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
+#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+
+/*
+ * If we are concatenating first level stage-2 page tables, we would have less
+ * than or equal to 16 pointers in the fake PGD, because that's what the
+ * architecture allows. In this case, (4 - CONFIG_ARM64_PGTABLE_LEVELS)
+ * represents the first level for the host, and we add 1 to go to the next
+ * level (which uses contatenation) for the stage-2 tables.
+ */
+#if PTRS_PER_S2_PGD <= 16
+#define KVM_PREALLOC_LEVEL (4 - CONFIG_ARM64_PGTABLE_LEVELS + 1)
+#else
+#define KVM_PREALLOC_LEVEL (0)
+#endif
+
+/**
+ * kvm_prealloc_hwpgd - allocate inital table for VTTBR
+ * @kvm: The KVM struct pointer for the VM.
+ * @pgd: The kernel pseudo pgd
+ *
+ * When the kernel uses more levels of page tables than the guest, we allocate
+ * a fake PGD and pre-populate it to point to the next-level page table, which
+ * will be the real initial page table pointed to by the VTTBR.
+ *
+ * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
+ * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
+ * allocate 2 consecutive PUD pages.
+ */
+static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
+{
+ unsigned int i;
+ unsigned long hwpgd;
+
+ if (KVM_PREALLOC_LEVEL == 0)
+ return 0;
+
+ hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
+ if (!hwpgd)
+ return -ENOMEM;
+
+ for (i = 0; i < PTRS_PER_S2_PGD; i++) {
+ if (KVM_PREALLOC_LEVEL == 1)
+ pgd_populate(NULL, pgd + i,
+ (pud_t *)hwpgd + i * PTRS_PER_PUD);
+ else if (KVM_PREALLOC_LEVEL == 2)
+ pud_populate(NULL, pud_offset(pgd, 0) + i,
+ (pmd_t *)hwpgd + i * PTRS_PER_PMD);
+ }
+
+ return 0;
+}
+
+static inline void *kvm_get_hwpgd(struct kvm *kvm)
+{
+ pgd_t *pgd = kvm->arch.pgd;
+ pud_t *pud;
+
+ if (KVM_PREALLOC_LEVEL == 0)
+ return pgd;
+
+ pud = pud_offset(pgd, 0);
+ if (KVM_PREALLOC_LEVEL == 1)
+ return pud;
+
+ BUG_ON(KVM_PREALLOC_LEVEL != 2);
+ return pmd_offset(pud, 0);
+}
+
+static inline void kvm_free_hwpgd(struct kvm *kvm)
+{
+ if (KVM_PREALLOC_LEVEL > 0) {
+ unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
+ free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
+ }
+}
+
static inline bool kvm_page_empty(void *ptr)
{
struct page *ptr_page = virt_to_page(ptr);
return page_count(ptr_page) == 1;
}
-#define kvm_pte_table_empty(ptep) kvm_page_empty(ptep)
-#ifndef CONFIG_ARM64_64K_PAGES
-#define kvm_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
+
+#ifdef __PAGETABLE_PMD_FOLDED
+#define kvm_pmd_table_empty(kvm, pmdp) (0)
+#else
+#define kvm_pmd_table_empty(kvm, pmdp) \
+ (kvm_page_empty(pmdp) && (!(kvm) || KVM_PREALLOC_LEVEL < 2))
+#endif
+
+#ifdef __PAGETABLE_PUD_FOLDED
+#define kvm_pud_table_empty(kvm, pudp) (0)
#else
-#define kvm_pmd_table_empty(pmdp) (0)
+#define kvm_pud_table_empty(kvm, pudp) \
+ (kvm_page_empty(pudp) && (!(kvm) || KVM_PREALLOC_LEVEL < 1))
#endif
-#define kvm_pud_table_empty(pudp) (0)
struct kvm;
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index cefd3e825612..41a43bf26492 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -79,7 +79,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
+#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
index ae211772f991..f002fe1c3700 100644
--- a/arch/arm64/kvm/vgic-v2-switch.S
+++ b/arch/arm64/kvm/vgic-v2-switch.S
@@ -67,10 +67,14 @@ CPU_BE( rev w11, w11 )
str w4, [x3, #VGIC_V2_CPU_HCR]
str w5, [x3, #VGIC_V2_CPU_VMCR]
str w6, [x3, #VGIC_V2_CPU_MISR]
- str w7, [x3, #VGIC_V2_CPU_EISR]
- str w8, [x3, #(VGIC_V2_CPU_EISR + 4)]
- str w9, [x3, #VGIC_V2_CPU_ELRSR]
- str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)]
+CPU_LE( str w7, [x3, #VGIC_V2_CPU_EISR] )
+CPU_LE( str w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
+CPU_LE( str w9, [x3, #VGIC_V2_CPU_ELRSR] )
+CPU_LE( str w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
+CPU_BE( str w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
+CPU_BE( str w8, [x3, #VGIC_V2_CPU_EISR] )
+CPU_BE( str w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
+CPU_BE( str w10, [x3, #VGIC_V2_CPU_ELRSR] )
str w11, [x3, #VGIC_V2_CPU_APR]
/* Clear GICH_HCR */