diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2020-03-27 17:00:20 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2020-03-30 16:58:36 -0300 |
commit | 53bfe17ff88faaadf024956e7cb2b295fae7744b (patch) | |
tree | 1f55d991863b7e21a138c11c29abf485a4fa560c /mm/hmm.c | |
parent | 846babe85efdda49feba5b169668333dcf3edf25 (diff) | |
download | linux-stable-53bfe17ff88faaadf024956e7cb2b295fae7744b.tar.gz linux-stable-53bfe17ff88faaadf024956e7cb2b295fae7744b.tar.bz2 linux-stable-53bfe17ff88faaadf024956e7cb2b295fae7744b.zip |
mm/hmm: do not set pfns when returning an error code
Most places that return an error code, like -EFAULT, do not set
HMM_PFN_ERROR, only two places do this.
Resolve this inconsistency by never setting the pfns on an error
exit. This doesn't seem like a worthwhile thing to do anyhow.
If for some reason it becomes important, it makes more sense to directly
return the address of the failing page rather than have the caller scan
for the HMM_PFN_ERROR.
No caller inspects the pnfs output array if hmm_range_fault() fails.
Link: https://lore.kernel.org/r/20200327200021.29372-9-jgg@ziepe.ca
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r-- | mm/hmm.c | 18 |
1 files changed, 3 insertions, 15 deletions
@@ -77,17 +77,14 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end, unsigned int required_fault, struct mm_walk *walk) { struct hmm_vma_walk *hmm_vma_walk = walk->private; - struct hmm_range *range = hmm_vma_walk->range; struct vm_area_struct *vma = walk->vma; - uint64_t *pfns = range->pfns; - unsigned long i = (addr - range->start) >> PAGE_SHIFT; unsigned int fault_flags = FAULT_FLAG_REMOTE; WARN_ON_ONCE(!required_fault); hmm_vma_walk->last = addr; if (!vma) - goto out_error; + return -EFAULT; if (required_fault & HMM_NEED_WRITE_FAULT) { if (!(vma->vm_flags & VM_WRITE)) @@ -95,15 +92,10 @@ static int hmm_vma_fault(unsigned long addr, unsigned long end, fault_flags |= FAULT_FLAG_WRITE; } - for (; addr < end; addr += PAGE_SIZE, i++) + for (; addr < end; addr += PAGE_SIZE) if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR) - goto out_error; - + return -EFAULT; return -EBUSY; - -out_error: - pfns[i] = range->values[HMM_PFN_ERROR]; - return -EFAULT; } static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, @@ -292,7 +284,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, /* Report error for everything else */ pte_unmap(ptep); - *pfn = range->values[HMM_PFN_ERROR]; return -EFAULT; } @@ -578,9 +569,6 @@ static const struct mm_walk_ops hmm_walk_ops = { * * This is similar to get_user_pages(), except that it can read the page tables * without mutating them (ie causing faults). - * - * On error, for one virtual address in the range, the function will mark the - * corresponding HMM pfn entry with an error flag. */ long hmm_range_fault(struct hmm_range *range) { |