diff options
-rw-r--r-- | include/linux/ksm.h | 20 | ||||
-rw-r--r-- | mm/ksm.c | 14 | ||||
-rw-r--r-- | mm/mmap.c | 1 |
3 files changed, 13 insertions, 22 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 0e26de6adb51..a485c14ecd5d 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -12,8 +12,6 @@ #include <linux/sched.h> #include <linux/vmstat.h> -struct mmu_gather; - #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags); @@ -27,19 +25,6 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } -/* - * For KSM to handle OOM without deadlock when it's breaking COW in a - * likely victim of the OOM killer, exit_mmap() has to serialize with - * ksm_exit() after freeing mm's pages but before freeing its page tables. - * That leaves a window in which KSM might refault pages which have just - * been finally unmapped: guard against that with ksm_test_exit(), and - * use it after getting mmap_sem in ksm.c, to check if mm is exiting. - */ -static inline bool ksm_test_exit(struct mm_struct *mm) -{ - return atomic_read(&mm->mm_users) == 0; -} - static inline void ksm_exit(struct mm_struct *mm) { if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) @@ -79,11 +64,6 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } -static inline bool ksm_test_exit(struct mm_struct *mm) -{ - return 0; -} - static inline void ksm_exit(struct mm_struct *mm) { } @@ -32,7 +32,6 @@ #include <linux/mmu_notifier.h> #include <linux/ksm.h> -#include <asm/tlb.h> #include <asm/tlbflush.h> /* @@ -285,6 +284,19 @@ static inline int in_stable_tree(struct rmap_item *rmap_item) } /* + * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's + * page tables after it has passed through ksm_exit() - which, if necessary, + * takes mmap_sem briefly to serialize against them. ksm_exit() does not set + * a special flag: they can just back out as soon as mm_users goes to zero. + * ksm_test_exit() is used throughout to make this test for exit: in some + * places for correctness, in some places just to avoid unnecessary work. + */ +static inline bool ksm_test_exit(struct mm_struct *mm) +{ + return atomic_read(&mm->mm_users) == 0; +} + +/* * We use break_ksm to break COW on a ksm page: it's a stripped down * * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) diff --git a/mm/mmap.c b/mm/mmap.c index 22dff49d579e..6eed98c00543 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -27,7 +27,6 @@ #include <linux/mount.h> #include <linux/mempolicy.h> #include <linux/rmap.h> -#include <linux/ksm.h> #include <linux/mmu_notifier.h> #include <linux/perf_event.h> |