From 8a966ed746d63c8103d496da85973eeeec01d77f Mon Sep 17 00:00:00 2001 From: Ebru Akagunduz Date: Tue, 26 Jul 2016 15:25:03 -0700 Subject: mm: make swapin readahead to improve thp collapse rate This patch makes swapin readahead to improve thp collapse rate. When khugepaged scanned pages, there can be a few of the pages in swap area. With the patch THP can collapse 4kB pages into a THP when there are up to max_ptes_swap swap ptes in a 2MB range. The patch was tested with a test program that allocates 400B of memory, writes to it, and then sleeps. I force the system to swap out all. Afterwards, the test program touches the area by writing, it skips a page in each 20 pages of the area. Without the patch, system did not swap in readahead. THP rate was %65 of the program of the memory, it did not change over time. With this patch, after 10 minutes of waiting khugepaged had collapsed %99 of the program's memory. [kirill.shutemov@linux.intel.com: trivial cleanup of exit path of the function] [kirill.shutemov@linux.intel.com: __collapse_huge_page_swapin(): drop unused 'pte' parameter] [kirill.shutemov@linux.intel.com: do not hold anon_vma lock during swap in] Signed-off-by: Ebru Akagunduz Acked-by: Rik van Riel Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Joonsoo Kim Cc: Xie XiuQi Cc: Cyrill Gorcunov Cc: Mel Gorman Cc: David Rientjes Cc: Vlastimil Babka Cc: Aneesh Kumar K.V Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 43 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) (limited to 'mm/huge_memory.c') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ed474483a620..b11351579e7a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2373,6 +2373,44 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return !(vma->vm_flags & VM_NO_THP); } +/* + * Bring missing pages in from swap, to complete THP collapse. + * Only done if khugepaged_scan_pmd believes it is worthwhile. + * + * Called and returns without pte mapped or spinlocks held, + * but with mmap_sem held to protect against vma changes. + */ + +static void __collapse_huge_page_swapin(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd) +{ + unsigned long _address; + pte_t *pte, pteval; + int swapped_in = 0, ret = 0; + + pte = pte_offset_map(pmd, address); + for (_address = address; _address < address + HPAGE_PMD_NR*PAGE_SIZE; + pte++, _address += PAGE_SIZE) { + pteval = *pte; + if (!is_swap_pte(pteval)) + continue; + swapped_in++; + ret = do_swap_page(mm, vma, _address, pte, pmd, + FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_RETRY_NOWAIT, + pteval); + if (ret & VM_FAULT_ERROR) { + trace_mm_collapse_huge_page_swapin(mm, swapped_in, 0); + return; + } + /* pte is unmapped now, we need to map it */ + pte = pte_offset_map(pmd, _address); + } + pte--; + pte_unmap(pte); + trace_mm_collapse_huge_page_swapin(mm, swapped_in, 1); +} + static void collapse_huge_page(struct mm_struct *mm, unsigned long address, struct page **hpage, @@ -2440,6 +2478,8 @@ static void collapse_huge_page(struct mm_struct *mm, goto out; } + __collapse_huge_page_swapin(mm, vma, address, pmd); + anon_vma_lock_write(vma->anon_vma); pte = pte_offset_map(pmd, address); @@ -2516,9 +2556,6 @@ static void collapse_huge_page(struct mm_struct *mm, result = SCAN_SUCCEED; out_up_write: up_write(&mm->mmap_sem); - trace_mm_collapse_huge_page(mm, isolated, result); - return; - out_nolock: trace_mm_collapse_huge_page(mm, isolated, result); return; -- cgit v1.2.3