summaryrefslogtreecommitdiffstats
path: root/include/linux/huge_mm.h
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2016-03-17 14:20:10 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-17 15:09:34 -0700
commitfec89c109f3a7737fe3a7bf0f40d1fb7709d353b (patch)
treebe5843c06b4185106ae2acbfbe02b6ea7494d28a /include/linux/huge_mm.h
parente388466de4a2a1a50c43bfaeacc0c8254d9e7cb2 (diff)
downloadlinux-stable-fec89c109f3a7737fe3a7bf0f40d1fb7709d353b.tar.gz
linux-stable-fec89c109f3a7737fe3a7bf0f40d1fb7709d353b.tar.bz2
linux-stable-fec89c109f3a7737fe3a7bf0f40d1fb7709d353b.zip
thp: rewrite freeze_page()/unfreeze_page() with generic rmap walkers
freeze_page() and unfreeze_page() helpers evolved in rather complex beasts. It would be nice to cut complexity of this code. This patch rewrites freeze_page() using standard try_to_unmap(). unfreeze_page() is rewritten with remove_migration_ptes(). The result is much simpler. But the new variant is somewhat slower for PTE-mapped THPs. Current helpers iterates over VMAs the compound page is mapped to, and then over ptes within this VMA. New helpers iterates over small page, then over VMA the small page mapped to, and only then find relevant pte. We have short cut for PMD-mapped THP: we directly install migration entries on PMD split. I don't think the slowdown is critical, considering how much simpler result is and that split_huge_page() is quite rare nowadays. It only happens due memory pressure or migration. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/huge_mm.h')
-rw-r--r--include/linux/huge_mm.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 01ad22e938b0..5307dfb3f8ec 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -96,18 +96,20 @@ static inline int split_huge_page(struct page *page)
void deferred_split_huge_page(struct page *page);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long address);
+ unsigned long address, bool freeze);
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
if (pmd_trans_huge(*____pmd) \
|| pmd_devmap(*____pmd)) \
- __split_huge_pmd(__vma, __pmd, __address); \
+ __split_huge_pmd(__vma, __pmd, __address, \
+ false); \
} while (0)
-void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address);
+void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
+ bool freeze, struct page *page);
#if HPAGE_PMD_ORDER >= MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator"
@@ -178,7 +180,7 @@ static inline void deferred_split_huge_page(struct page *page) {}
do { } while (0)
static inline void split_huge_pmd_address(struct vm_area_struct *vma,
- unsigned long address) {}
+ unsigned long address, bool freeze, struct page *page) {}
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)