diff options
author | Mel Gorman <mel@csn.ul.ie> | 2007-10-16 01:25:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:43:00 -0700 |
commit | e12ba74d8ff3e2f73a583500d7095e406df4d093 (patch) | |
tree | a0d3385b65f0b3e1e00b0bbf11b75e7538a93edb /mm | |
parent | c361be55b3128474aa66d31092db330b07539103 (diff) | |
download | linux-stable-e12ba74d8ff3e2f73a583500d7095e406df4d093.tar.gz linux-stable-e12ba74d8ff3e2f73a583500d7095e406df4d093.tar.bz2 linux-stable-e12ba74d8ff3e2f73a583500d7095e406df4d093.zip |
Group short-lived and reclaimable kernel allocations
This patch marks a number of allocations that are either short-lived such as
network buffers or are reclaimable such as inode allocations. When something
like updatedb is called, long-lived and unmovable kernel allocations tend to
be spread throughout the address space which increases fragmentation.
This patch groups these allocations together as much as possible by adding a
new MIGRATE_TYPE. The MIGRATE_RECLAIMABLE type is for allocations that can be
reclaimed on demand, but not moved. i.e. they can be migrated by deleting
them and re-reading the information from elsewhere.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 10 | ||||
-rw-r--r-- | mm/shmem.c | 4 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 3 |
4 files changed, 14 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d575a3ee8dd8..29f4de1423c9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -172,7 +172,10 @@ static void set_pageblock_migratetype(struct page *page, int migratetype) static inline int gfpflags_to_migratetype(gfp_t gfp_flags) { - return ((gfp_flags & __GFP_MOVABLE) != 0); + WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); + + return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) | + ((gfp_flags & __GFP_RECLAIMABLE) != 0); } #else @@ -676,8 +679,9 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) * the free lists for the desirable migrate type are depleted */ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { - [MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE }, - [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE }, + [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE }, + [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE }, + [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE }, }; /* diff --git a/mm/shmem.c b/mm/shmem.c index 855b93b3637c..76ecbac0d55b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -95,9 +95,9 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask) * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE: * might be reconsidered if it ever diverges from PAGE_SIZE. * - * __GFP_MOVABLE is masked out as swap vectors cannot move + * Mobility flags are masked out as swap vectors cannot move */ - return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO, + return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO, PAGE_CACHE_SHIFT-PAGE_SHIFT); } diff --git a/mm/slab.c b/mm/slab.c index 8fb56ae685de..e34bcb87a6ee 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1643,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid) #endif flags |= cachep->gfpflags; + if (cachep->flags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; page = alloc_pages_node(nodeid, flags, cachep->gfporder); if (!page) diff --git a/mm/slub.c b/mm/slub.c index 19d3202ca2dc..a90c4ffc9576 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1055,6 +1055,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) if (s->flags & SLAB_CACHE_DMA) flags |= SLUB_DMA; + if (s->flags & SLAB_RECLAIM_ACCOUNT) + flags |= __GFP_RECLAIMABLE; + if (node == -1) page = alloc_pages(flags, s->order); else |