summaryrefslogtreecommitdiffstats
path: root/arch/s390/mm
diff options
context:
space:
mode:
authorJanosch Frank <frankja@linux.ibm.com>2018-07-13 11:28:37 +0100
committerJanosch Frank <frankja@linux.ibm.com>2018-07-30 23:13:38 +0200
commita9e00d8349c98e0973c8b0d671d69e838f7b5bcc (patch)
treeb6c9a4374f72c0f6ccea510dc8fc1f8c74d43d2d /arch/s390/mm
parent7d735b9ae82d073e23ff7d2648e0aa8056049d16 (diff)
downloadlinux-stable-a9e00d8349c98e0973c8b0d671d69e838f7b5bcc.tar.gz
linux-stable-a9e00d8349c98e0973c8b0d671d69e838f7b5bcc.tar.bz2
linux-stable-a9e00d8349c98e0973c8b0d671d69e838f7b5bcc.zip
s390/mm: Add huge page gmap linking support
Let's allow huge pmd linking when enabled through the KVM_CAP_S390_HPAGE_1M capability. Also we can now restrict gmap invalidation and notification to the cases where the capability has been activated and save some cycles when that's not the case. Signed-off-by: Janosch Frank <frankja@linux.ibm.com> Reviewed-by: David Hildenbrand <david@redhat.com>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r--arch/s390/mm/gmap.c9
-rw-r--r--arch/s390/mm/pgtable.c8
2 files changed, 10 insertions, 7 deletions
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 736ed32a83c5..bb44990c8212 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2,8 +2,10 @@
/*
* KVM guest address space mapping code
*
- * Copyright IBM Corp. 2007, 2016
+ * Copyright IBM Corp. 2007, 2016, 2018
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * David Hildenbrand <david@redhat.com>
+ * Janosch Frank <frankja@linux.vnet.ibm.com>
*/
#include <linux/kernel.h>
@@ -588,8 +590,8 @@ int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
return -EFAULT;
pmd = pmd_offset(pud, vmaddr);
VM_BUG_ON(pmd_none(*pmd));
- /* large pmds cannot yet be handled */
- if (pmd_large(*pmd))
+ /* Are we allowed to use huge pages? */
+ if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
return -EFAULT;
/* Link gmap segment table entry location to page table. */
rc = radix_tree_preload(GFP_KERNEL);
@@ -1632,6 +1634,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
unsigned long limit;
int rc;
+ BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
BUG_ON(gmap_is_shadow(parent));
spin_lock(&parent->shadow_lock);
sg = gmap_find_shadow(parent, asce, edat_level);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 37d68706f5aa..f2cc7da473e4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -347,7 +347,7 @@ static inline void pmdp_idte_local(struct mm_struct *mm,
mm->context.asce, IDTE_LOCAL);
else
__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_idte_local(mm, addr);
}
@@ -357,15 +357,15 @@ static inline void pmdp_idte_global(struct mm_struct *mm,
if (MACHINE_HAS_TLB_GUEST) {
__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
mm->context.asce, IDTE_GLOBAL);
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_idte_global(mm, addr);
} else if (MACHINE_HAS_IDTE) {
__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_idte_global(mm, addr);
} else {
__pmdp_csp(pmdp);
- if (mm_has_pgste(mm))
+ if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
gmap_pmdp_csp(mm, addr);
}
}