From 97500a4a54876d3d6d2d1b8419223eb4e69b32d8 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 May 2019 15:41:35 -0700 Subject: mm: maintain randomization of page free lists When freeing a page with an order >= shuffle_page_order randomly select the front or back of the list for insertion. While the mm tries to defragment physical pages into huge pages this can tend to make the page allocator more predictable over time. Inject the front-back randomness to preserve the initial randomness established by shuffle_free_memory() when the kernel was booted. The overhead of this manipulation is constrained by only being applied for MAX_ORDER sized pages by default. [akpm@linux-foundation.org: coding-style fixes] Link: http://lkml.kernel.org/r/154899812788.3165233.9066631950746578517.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Reviewed-by: Kees Cook Cc: Michal Hocko Cc: Dave Hansen Cc: Keith Busch Cc: Robert Elliott Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/shuffle.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'mm/shuffle.h') diff --git a/mm/shuffle.h b/mm/shuffle.h index 644c8ee97b9e..777a257a0d2f 100644 --- a/mm/shuffle.h +++ b/mm/shuffle.h @@ -36,6 +36,13 @@ static inline void shuffle_zone(struct zone *z) return; __shuffle_zone(z); } + +static inline bool is_shuffle_order(int order) +{ + if (!static_branch_unlikely(&page_alloc_shuffle_key)) + return false; + return order >= SHUFFLE_ORDER; +} #else static inline void shuffle_free_memory(pg_data_t *pgdat) { @@ -48,5 +55,10 @@ static inline void shuffle_zone(struct zone *z) static inline void page_alloc_shuffle(enum mm_shuffle_ctl ctl) { } + +static inline bool is_shuffle_order(int order) +{ + return false; +} #endif #endif /* _MM_SHUFFLE_H */ -- cgit v1.2.3