summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMuchun Song <songmuchun@bytedance.com>2020-12-14 19:11:25 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-09-22 11:42:57 +0200
commitc88c1fd8861ea33933917b5d4ed3c1d167cb8118 (patch)
tree87e51c26c074736df8012fbed2e463673cfc3016 /mm
parent43b225cb64c260023bb4b7f511317c468e3259e1 (diff)
downloadlinux-stable-c88c1fd8861ea33933917b5d4ed3c1d167cb8118.tar.gz
linux-stable-c88c1fd8861ea33933917b5d4ed3c1d167cb8118.tar.bz2
linux-stable-c88c1fd8861ea33933917b5d4ed3c1d167cb8118.zip
mm/page_alloc: speed up the iteration of max_order
commit 7ad69832f37e3cea8557db6df7c793905f1135e8 upstream. When we free a page whose order is very close to MAX_ORDER and greater than pageblock_order, it wastes some CPU cycles to increase max_order to MAX_ORDER one by one and check the pageblock migratetype of that page repeatedly especially when MAX_ORDER is much larger than pageblock_order. We also should not be checking migratetype of buddy when "order == MAX_ORDER - 1" as the buddy pfn may be invalid, so adjust the condition. With the new check, we don't need the max_order check anymore, so we replace it. Also adjust max_order initialization so that it's lower by one than previously, which makes the code hopefully more clear. Link: https://lkml.kernel.org/r/20201204155109.55451-1-songmuchun@bytedance.com Fixes: d9dddbf55667 ("mm/page_alloc: prevent merging between isolated and other pageblocks") Signed-off-by: Muchun Song <songmuchun@bytedance.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b0c451e3b59f..babcbd8b94ea 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -814,7 +814,7 @@ static inline void __free_one_page(struct page *page,
struct page *buddy;
unsigned int max_order;
- max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
+ max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
VM_BUG_ON(!zone_is_initialized(zone));
VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
@@ -829,7 +829,7 @@ static inline void __free_one_page(struct page *page,
VM_BUG_ON_PAGE(bad_range(zone, page), page);
continue_merging:
- while (order < max_order - 1) {
+ while (order < max_order) {
buddy_idx = __find_buddy_index(page_idx, order);
buddy = page + (buddy_idx - page_idx);
if (!page_is_buddy(page, buddy, order))
@@ -850,7 +850,7 @@ continue_merging:
page_idx = combined_idx;
order++;
}
- if (max_order < MAX_ORDER) {
+ if (order < MAX_ORDER - 1) {
/* If we are here, it means order is >= pageblock_order.
* We want to prevent merge between freepages on isolate
* pageblock and normal pageblock. Without this, pageblock
@@ -871,7 +871,7 @@ continue_merging:
is_migrate_isolate(buddy_mt)))
goto done_merging;
}
- max_order++;
+ max_order = order + 1;
goto continue_merging;
}