summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 15:32:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 19:47:34 -0700
commited0ae21dc5fe3b9ad4cf1c7bb2bfd2ad596c481c (patch)
tree1ccdf36012a6a8ed22c5a78f7093bae0a259274e /mm
parent0ac3a4099b0171ff965836182bc688bb8ca01058 (diff)
downloadlinux-ed0ae21dc5fe3b9ad4cf1c7bb2bfd2ad596c481c.tar.gz
linux-ed0ae21dc5fe3b9ad4cf1c7bb2bfd2ad596c481c.tar.bz2
linux-ed0ae21dc5fe3b9ad4cf1c7bb2bfd2ad596c481c.zip
page allocator: do not call get_pageblock_migratetype() more than necessary
get_pageblock_migratetype() is potentially called twice for every page free. Once, when being freed to the pcp lists and once when being freed back to buddy. When freeing from the pcp lists, it is known what the pageblock type was at the time of free so use it rather than rechecking. In low memory situations under memory pressure, this might skew anti-fragmentation slightly but the interference is minimal and decisions that are fragmenting memory are being made anyway. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 91e29b3ed2b6..8f334d339b08 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -452,16 +452,18 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
*/
static inline void __free_one_page(struct page *page,
- struct zone *zone, unsigned int order)
+ struct zone *zone, unsigned int order,
+ int migratetype)
{
unsigned long page_idx;
int order_size = 1 << order;
- int migratetype = get_pageblock_migratetype(page);
if (unlikely(PageCompound(page)))
if (unlikely(destroy_compound_page(page, order)))
return;
+ VM_BUG_ON(migratetype == -1);
+
page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
VM_BUG_ON(page_idx & (order_size - 1));
@@ -530,17 +532,18 @@ static void free_pages_bulk(struct zone *zone, int count,
page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_one_page list manipulates */
list_del(&page->lru);
- __free_one_page(page, zone, order);
+ __free_one_page(page, zone, order, page_private(page));
}
spin_unlock(&zone->lock);
}
-static void free_one_page(struct zone *zone, struct page *page, int order)
+static void free_one_page(struct zone *zone, struct page *page, int order,
+ int migratetype)
{
spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
- __free_one_page(page, zone, order);
+ __free_one_page(page, zone, order, migratetype);
spin_unlock(&zone->lock);
}
@@ -565,7 +568,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
- free_one_page(page_zone(page), page, order);
+ free_one_page(page_zone(page), page, order,
+ get_pageblock_migratetype(page));
local_irq_restore(flags);
}