diff options
author | Jan Kara <jack@suse.cz> | 2016-12-14 15:07:33 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 16:04:09 -0800 |
commit | a41b70d6dfc28b9e1a17c2a9f3181c2b614bfd54 (patch) | |
tree | 55ca53d3e7f49f9c58a5e58ce08c7b1be6fdc313 /mm | |
parent | 38b8cb7fbb892503fe9fcf748ebbed8c9fde7bf8 (diff) | |
download | linux-a41b70d6dfc28b9e1a17c2a9f3181c2b614bfd54.tar.gz linux-a41b70d6dfc28b9e1a17c2a9f3181c2b614bfd54.tar.bz2 linux-a41b70d6dfc28b9e1a17c2a9f3181c2b614bfd54.zip |
mm: use vmf->page during WP faults
So far we set vmf->page during WP faults only when we needed to pass it
to the ->page_mkwrite handler. Set it in all the cases now and use that
instead of passing page pointer explicitly around.
Link: http://lkml.kernel.org/r/1479460644-25076-14-git-send-email-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 58 |
1 files changed, 29 insertions, 29 deletions
diff --git a/mm/memory.c b/mm/memory.c index e8a527885e8b..ad452898e6c0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2102,11 +2102,12 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma, * case, all we need to do here is to mark the page as writable and update * any related book-keeping. */ -static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page, +static inline int wp_page_reuse(struct vm_fault *vmf, int page_mkwrite, int dirty_shared) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; + struct page *page = vmf->page; pte_t entry; /* * Clear the pages cpupid information as the existing @@ -2150,10 +2151,11 @@ static inline int wp_page_reuse(struct vm_fault *vmf, struct page *page, * held to the old page, as well as updating the rmap. * - In any case, unlock the PTL and drop the reference we took to the old page. */ -static int wp_page_copy(struct vm_fault *vmf, struct page *old_page) +static int wp_page_copy(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; + struct page *old_page = vmf->page; struct page *new_page = NULL; pte_t entry; int page_copied = 0; @@ -2305,26 +2307,25 @@ static int wp_pfn_shared(struct vm_fault *vmf) return 0; } } - return wp_page_reuse(vmf, NULL, 0, 0); + return wp_page_reuse(vmf, 0, 0); } -static int wp_page_shared(struct vm_fault *vmf, struct page *old_page) +static int wp_page_shared(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; int page_mkwrite = 0; - get_page(old_page); + get_page(vmf->page); if (vma->vm_ops && vma->vm_ops->page_mkwrite) { int tmp; pte_unmap_unlock(vmf->pte, vmf->ptl); - vmf->page = old_page; tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { - put_page(old_page); + put_page(vmf->page); return tmp; } /* @@ -2336,15 +2337,15 @@ static int wp_page_shared(struct vm_fault *vmf, struct page *old_page) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!pte_same(*vmf->pte, vmf->orig_pte)) { - unlock_page(old_page); + unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - put_page(old_page); + put_page(vmf->page); return 0; } page_mkwrite = 1; } - return wp_page_reuse(vmf, old_page, page_mkwrite, 1); + return wp_page_reuse(vmf, page_mkwrite, 1); } /* @@ -2369,10 +2370,9 @@ static int do_wp_page(struct vm_fault *vmf) __releases(vmf->ptl) { struct vm_area_struct *vma = vmf->vma; - struct page *old_page; - old_page = vm_normal_page(vma, vmf->address, vmf->orig_pte); - if (!old_page) { + vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); + if (!vmf->page) { /* * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a * VM_PFNMAP VMA. @@ -2385,30 +2385,30 @@ static int do_wp_page(struct vm_fault *vmf) return wp_pfn_shared(vmf); pte_unmap_unlock(vmf->pte, vmf->ptl); - return wp_page_copy(vmf, old_page); + return wp_page_copy(vmf); } /* * Take out anonymous pages first, anonymous shared vmas are * not dirty accountable. */ - if (PageAnon(old_page) && !PageKsm(old_page)) { + if (PageAnon(vmf->page) && !PageKsm(vmf->page)) { int total_mapcount; - if (!trylock_page(old_page)) { - get_page(old_page); + if (!trylock_page(vmf->page)) { + get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - lock_page(old_page); + lock_page(vmf->page); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (!pte_same(*vmf->pte, vmf->orig_pte)) { - unlock_page(old_page); + unlock_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - put_page(old_page); + put_page(vmf->page); return 0; } - put_page(old_page); + put_page(vmf->page); } - if (reuse_swap_page(old_page, &total_mapcount)) { + if (reuse_swap_page(vmf->page, &total_mapcount)) { if (total_mapcount == 1) { /* * The page is all ours. Move it to @@ -2417,24 +2417,24 @@ static int do_wp_page(struct vm_fault *vmf) * Protected against the rmap code by * the page lock. */ - page_move_anon_rmap(old_page, vma); + page_move_anon_rmap(vmf->page, vma); } - unlock_page(old_page); - return wp_page_reuse(vmf, old_page, 0, 0); + unlock_page(vmf->page); + return wp_page_reuse(vmf, 0, 0); } - unlock_page(old_page); + unlock_page(vmf->page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { - return wp_page_shared(vmf, old_page); + return wp_page_shared(vmf); } /* * Ok, we need to copy. Oh, well.. */ - get_page(old_page); + get_page(vmf->page); pte_unmap_unlock(vmf->pte, vmf->ptl); - return wp_page_copy(vmf, old_page); + return wp_page_copy(vmf); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, |