diff options
author | Jérôme Glisse <jglisse@redhat.com> | 2018-04-10 16:28:42 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-11 10:28:30 -0700 |
commit | 855ce7d2525c97cf706ad82a419f0c2d632b9481 (patch) | |
tree | 4c7cb4c0d14450792fdf512b5eb8e0832080ec8e /mm | |
parent | ff05c0c6bbe5043af6a1686522ed845f40ba49ee (diff) | |
download | linux-stable-855ce7d2525c97cf706ad82a419f0c2d632b9481.tar.gz linux-stable-855ce7d2525c97cf706ad82a419f0c2d632b9481.tar.bz2 linux-stable-855ce7d2525c97cf706ad82a419f0c2d632b9481.zip |
mm/hmm: cleanup special vma handling (VM_SPECIAL)
Special vma (one with any of the VM_SPECIAL flags) can not be access by
device because there is no consistent model across device drivers on those
vma and their backing memory.
This patch directly use hmm_range struct for hmm_pfns_special() argument
as it is always affecting the whole vma and thus the whole range.
It also make behavior consistent after this patch both hmm_vma_fault() and
hmm_vma_get_pfns() returns -EINVAL when facing such vma. Previously
hmm_vma_fault() returned 0 and hmm_vma_get_pfns() return -EINVAL but both
were filling the HMM pfn array with special entry.
Link: http://lkml.kernel.org/r/20180323005527.758-10-jglisse@redhat.com
Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Cc: Evgeny Baskakov <ebaskakov@nvidia.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Mark Hairgrove <mhairgrove@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hmm.c | 40 |
1 files changed, 20 insertions, 20 deletions
@@ -324,14 +324,6 @@ static int hmm_vma_do_fault(struct mm_walk *walk, return -EAGAIN; } -static void hmm_pfns_special(uint64_t *pfns, - unsigned long addr, - unsigned long end) -{ - for (; addr < end; addr += PAGE_SIZE, pfns++) - *pfns = HMM_PFN_SPECIAL; -} - static int hmm_pfns_bad(unsigned long addr, unsigned long end, struct mm_walk *walk) @@ -529,6 +521,14 @@ fault: return 0; } +static void hmm_pfns_special(struct hmm_range *range) +{ + unsigned long addr = range->start, i = 0; + + for (; addr < range->end; addr += PAGE_SIZE, i++) + range->pfns[i] = HMM_PFN_SPECIAL; +} + /* * hmm_vma_get_pfns() - snapshot CPU page table for a range of virtual addresses * @range: range being snapshotted @@ -553,12 +553,6 @@ int hmm_vma_get_pfns(struct hmm_range *range) struct mm_walk mm_walk; struct hmm *hmm; - /* FIXME support hugetlb fs */ - if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { - hmm_pfns_special(range->pfns, range->start, range->end); - return -EINVAL; - } - /* Sanity check, this really should not happen ! */ if (range->start < vma->vm_start || range->start >= vma->vm_end) return -EINVAL; @@ -572,6 +566,12 @@ int hmm_vma_get_pfns(struct hmm_range *range) if (!hmm->mmu_notifier.ops) return -EINVAL; + /* FIXME support hugetlb fs */ + if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { + hmm_pfns_special(range); + return -EINVAL; + } + if (!(vma->vm_flags & VM_READ)) { /* * If vma do not allow read access, then assume that it does @@ -740,6 +740,12 @@ int hmm_vma_fault(struct hmm_range *range, bool write, bool block) if (!hmm->mmu_notifier.ops) return -EINVAL; + /* FIXME support hugetlb fs */ + if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { + hmm_pfns_special(range); + return -EINVAL; + } + if (!(vma->vm_flags & VM_READ)) { /* * If vma do not allow read access, then assume that it does @@ -751,12 +757,6 @@ int hmm_vma_fault(struct hmm_range *range, bool write, bool block) return -EPERM; } - /* FIXME support hugetlb fs */ - if (is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) { - hmm_pfns_special(range->pfns, range->start, range->end); - return 0; - } - /* Initialize range to track CPU page table update */ spin_lock(&hmm->lock); range->valid = true; |