summaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2017-10-23 07:20:00 -0700
committerDan Williams <dan.j.williams@intel.com>2018-01-19 16:50:53 -0800
commit785a3fab4adbf91b2189c928a59ae219c54ba95e (patch)
tree7a3f3342c336eb99cbc7ddbe09741af78d7e99b2 /mm/memory.c
parent77dd66a3c67c93ab401ccc15efff25578be281fd (diff)
downloadlinux-785a3fab4adbf91b2189c928a59ae219c54ba95e.tar.gz
linux-785a3fab4adbf91b2189c928a59ae219c54ba95e.tar.bz2
linux-785a3fab4adbf91b2189c928a59ae219c54ba95e.zip
mm, dax: introduce pfn_t_special()
In support of removing the VM_MIXEDMAP indication from DAX VMAs, introduce pfn_t_special() for drivers to indicate that _PAGE_SPECIAL should be used for DAX ptes. This also helps identify drivers like dccssblk that only want to use DAX in a read-only fashion without get_user_pages() support. Ideally we could delete axonram and dcssblk DAX support, but if we need to keep it better make it explicit that axonram and dcssblk only support a sub-set of DAX due to missing _PAGE_DEVMAP support. Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c16
1 files changed, 15 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ca5674cbaff2..46b6c33b7f04 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1897,12 +1897,26 @@ int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
}
EXPORT_SYMBOL(vm_insert_pfn_prot);
+static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
+{
+ /* these checks mirror the abort conditions in vm_normal_page */
+ if (vma->vm_flags & VM_MIXEDMAP)
+ return true;
+ if (pfn_t_devmap(pfn))
+ return true;
+ if (pfn_t_special(pfn))
+ return true;
+ if (is_zero_pfn(pfn_t_to_pfn(pfn)))
+ return true;
+ return false;
+}
+
static int __vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn, bool mkwrite)
{
pgprot_t pgprot = vma->vm_page_prot;
- BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
+ BUG_ON(!vm_mixed_ok(vma, pfn));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;