diff options
author | Mel Gorman <mgorman@suse.de> | 2012-11-12 09:06:20 +0000 |
---|---|---|
committer | Mel Gorman <mgorman@suse.de> | 2012-12-11 14:42:52 +0000 |
commit | 57e0a0309160b1b4ebde9f3c6a867cd96ac368bf (patch) | |
tree | 085278ae65303221b638bf4d5063be0d41d35098 | |
parent | fb003b80daa0dead5b87f4e2e4fb8da68b110ff2 (diff) | |
download | linux-stable-57e0a0309160b1b4ebde9f3c6a867cd96ac368bf.tar.gz linux-stable-57e0a0309160b1b4ebde9f3c6a867cd96ac368bf.tar.bz2 linux-stable-57e0a0309160b1b4ebde9f3c6a867cd96ac368bf.zip |
mm: numa: Introduce last_nid to the page frame
This patch introduces a last_nid field to the page struct. This is used
to build a two-stage filter in the next patch that is aimed at
mitigating a problem whereby pages migrate to the wrong node when
referenced by a process that was running off its home node.
Signed-off-by: Mel Gorman <mgorman@suse.de>
-rw-r--r-- | include/linux/mm.h | 30 | ||||
-rw-r--r-- | include/linux/mm_types.h | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 |
3 files changed, 36 insertions, 0 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index d04c2f0aab36..d87f9ec4a145 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -693,6 +693,36 @@ static inline int page_to_nid(const struct page *page) } #endif +#ifdef CONFIG_NUMA_BALANCING +static inline int page_xchg_last_nid(struct page *page, int nid) +{ + return xchg(&page->_last_nid, nid); +} + +static inline int page_last_nid(struct page *page) +{ + return page->_last_nid; +} +static inline void reset_page_last_nid(struct page *page) +{ + page->_last_nid = -1; +} +#else +static inline int page_xchg_last_nid(struct page *page, int nid) +{ + return page_to_nid(page); +} + +static inline int page_last_nid(struct page *page) +{ + return page_to_nid(page); +} + +static inline void reset_page_last_nid(struct page *page) +{ +} +#endif + static inline struct zone *page_zone(const struct page *page) { return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d1e246c5e50c..c5fffa239861 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -175,6 +175,10 @@ struct page { */ void *shadow; #endif + +#ifdef CONFIG_NUMA_BALANCING + int _last_nid; +#endif } /* * The struct page can be forced to be double word aligned so that atomic ops diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ef025e20dbee..73f226a1206e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -608,6 +608,7 @@ static inline int free_pages_check(struct page *page) bad_page(page); return 1; } + reset_page_last_nid(page); if (page->flags & PAGE_FLAGS_CHECK_AT_PREP) page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; return 0; @@ -3826,6 +3827,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, mminit_verify_page_links(page, zone, nid, pfn); init_page_count(page); reset_page_mapcount(page); + reset_page_last_nid(page); SetPageReserved(page); /* * Mark the block movable so that blocks are reserved for |