summaryrefslogtreecommitdiffstats
path: root/include/linux/slab_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-02-10 01:43:10 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-02-11 10:51:18 -0800
commit4b51d66989218aad731a721b5b28c79bf5388c09 (patch)
tree8ff7acbd219f699c20c2f1fd201ffb3db5a64062 /include/linux/slab_def.h
parent66701b1499a3ff11882c8c4aef36e8eac86e17b1 (diff)
downloadlinux-4b51d66989218aad731a721b5b28c79bf5388c09.tar.gz
linux-4b51d66989218aad731a721b5b28c79bf5388c09.tar.bz2
linux-4b51d66989218aad731a721b5b28c79bf5388c09.zip
[PATCH] optional ZONE_DMA: optional ZONE_DMA in the VM
Make ZONE_DMA optional in core code. - ifdef all code for ZONE_DMA and related definitions following the example for ZONE_DMA32 and ZONE_HIGHMEM. - Without ZONE_DMA, ZONE_HIGHMEM and ZONE_DMA32 we get to a ZONES_SHIFT of 0. - Modify the VM statistics to work correctly without a DMA zone. - Modify slab to not create DMA slabs if there is no ZONE_DMA. [akpm@osdl.org: cleanup] [jdike@addtoit.com: build fix] [apw@shadowen.org: Simplify calculation of the number of bits we need for ZONES_SHIFT] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Kyle McMartin <kyle@mcmartin.ca> Cc: Matthew Wilcox <willy@debian.org> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r--include/linux/slab_def.h30
1 files changed, 21 insertions, 9 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 4b463e66ddea..5e4364644ed1 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -19,7 +19,9 @@
struct cache_sizes {
size_t cs_size;
struct kmem_cache *cs_cachep;
+#ifdef CONFIG_ZONE_DMA
struct kmem_cache *cs_dmacachep;
+#endif
};
extern struct cache_sizes malloc_sizes[];
@@ -39,9 +41,12 @@ static inline void *kmalloc(size_t size, gfp_t flags)
__you_cannot_kmalloc_that_much();
}
found:
- return kmem_cache_alloc((flags & GFP_DMA) ?
- malloc_sizes[i].cs_dmacachep :
- malloc_sizes[i].cs_cachep, flags);
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
+ flags);
+#endif
+ return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
}
return __kmalloc(size, flags);
}
@@ -62,9 +67,12 @@ static inline void *kzalloc(size_t size, gfp_t flags)
__you_cannot_kzalloc_that_much();
}
found:
- return kmem_cache_zalloc((flags & GFP_DMA) ?
- malloc_sizes[i].cs_dmacachep :
- malloc_sizes[i].cs_cachep, flags);
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ return kmem_cache_zalloc(malloc_sizes[i].cs_dmacachep,
+ flags);
+#endif
+ return kmem_cache_zalloc(malloc_sizes[i].cs_cachep, flags);
}
return __kzalloc(size, flags);
}
@@ -88,9 +96,13 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
__you_cannot_kmalloc_that_much();
}
found:
- return kmem_cache_alloc_node((flags & GFP_DMA) ?
- malloc_sizes[i].cs_dmacachep :
- malloc_sizes[i].cs_cachep, flags, node);
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
+ flags, node);
+#endif
+ return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
+ flags, node);
}
return __kmalloc_node(size, flags, node);
}