summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-15 07:15:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-15 07:15:06 -0400
commitbff157b3ad4b9f6be0af6987fcd62deaf0f2b799 (patch)
tree02ae68620a40fefd9ffc2de739a8bb362baa3f08 /include
parent8bf5e36d0429e9b8fc2c84966577f10386bd7195 (diff)
parent23774a2f6fee0848503bfb8004eeeb5adef94f5c (diff)
downloadlinux-bff157b3ad4b9f6be0af6987fcd62deaf0f2b799.tar.gz
linux-bff157b3ad4b9f6be0af6987fcd62deaf0f2b799.tar.bz2
linux-bff157b3ad4b9f6be0af6987fcd62deaf0f2b799.zip
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull SLAB update from Pekka Enberg: "Nothing terribly exciting here apart from Christoph's kmalloc unification patches that brings sl[aou]b implementations closer to each other" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: slab: Use correct GFP_DMA constant slub: remove verify_mem_not_deleted() mm/sl[aou]b: Move kmallocXXX functions to common code mm, slab_common: add 'unlikely' to size check of kmalloc_slab() mm/slub.c: beautify code for removing redundancy 'break' statement. slub: Remove unnecessary page NULL check slub: don't use cpu partial pages on UP mm/slub: beautify code for 80 column limitation and tab alignment mm/slub: remove 'per_cpu' which is useless variable
Diffstat (limited to 'include')
-rw-r--r--include/linux/slab.h156
-rw-r--r--include/linux/slab_def.h106
-rw-r--r--include/linux/slob_def.h31
-rw-r--r--include/linux/slub_def.h110
4 files changed, 124 insertions, 279 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6c5cc0ea8713..74f105847d13 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -4,6 +4,8 @@
* (C) SGI 2006, Christoph Lameter
* Cleaned up and restructured to ease the addition of alternative
* implementations of SLAB allocators.
+ * (C) Linux Foundation 2008-2013
+ * Unified interface for all slab allocators
*/
#ifndef _LINUX_SLAB_H
@@ -94,6 +96,7 @@
#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
(unsigned long)ZERO_SIZE_PTR)
+#include <linux/kmemleak.h>
struct mem_cgroup;
/*
@@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size)
}
#endif /* !CONFIG_SLOB */
+void *__kmalloc(size_t size, gfp_t flags);
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
+
+#ifdef CONFIG_NUMA
+void *__kmalloc_node(size_t size, gfp_t flags, int node);
+void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+#else
+static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ return __kmalloc(size, flags);
+}
+
+static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
+{
+ return kmem_cache_alloc(s, flags);
+}
+#endif
+
+#ifdef CONFIG_TRACING
+extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
+
+#ifdef CONFIG_NUMA
+extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size);
+#else
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_trace(s, gfpflags, size);
+}
+#endif /* CONFIG_NUMA */
+
+#else /* CONFIG_TRACING */
+static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s,
+ gfp_t flags, size_t size)
+{
+ return kmem_cache_alloc(s, flags);
+}
+
+static __always_inline void *
+kmem_cache_alloc_node_trace(struct kmem_cache *s,
+ gfp_t gfpflags,
+ int node, size_t size)
+{
+ return kmem_cache_alloc_node(s, gfpflags, node);
+}
+#endif /* CONFIG_TRACING */
+
#ifdef CONFIG_SLAB
#include <linux/slab_def.h>
#endif
@@ -297,9 +351,60 @@ static __always_inline int kmalloc_index(size_t size)
#include <linux/slub_def.h>
#endif
-#ifdef CONFIG_SLOB
-#include <linux/slob_def.h>
+static __always_inline void *
+kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+{
+ void *ret;
+
+ flags |= (__GFP_COMP | __GFP_KMEMCG);
+ ret = (void *) __get_free_pages(flags, order);
+ kmemleak_alloc(ret, size, 1, flags);
+ return ret;
+}
+
+#ifdef CONFIG_TRACING
+extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
+#else
+static __always_inline void *
+kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+{
+ return kmalloc_order(size, flags, order);
+}
+#endif
+
+static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
+{
+ unsigned int order = get_order(size);
+ return kmalloc_order_trace(size, flags, order);
+}
+
+/**
+ * kmalloc - allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate (see kcalloc).
+ *
+ * kmalloc is the normal method of allocating memory
+ * for objects smaller than page size in the kernel.
+ */
+static __always_inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return kmalloc_large(size, flags);
+#ifndef CONFIG_SLOB
+ if (!(flags & GFP_DMA)) {
+ int index = kmalloc_index(size);
+
+ if (!index)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_trace(kmalloc_caches[index],
+ flags, size);
+ }
#endif
+ }
+ return __kmalloc(size, flags);
+}
/*
* Determine size used for the nth kmalloc cache.
@@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n)
return 0;
}
+static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+#ifndef CONFIG_SLOB
+ if (__builtin_constant_p(size) &&
+ size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
+ int i = kmalloc_index(size);
+
+ if (!i)
+ return ZERO_SIZE_PTR;
+
+ return kmem_cache_alloc_node_trace(kmalloc_caches[i],
+ flags, node, size);
+ }
+#endif
+ return __kmalloc_node(size, flags, node);
+}
+
/*
* Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
* Intended for arches that get misalignment faults even for 64 bit integer
@@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
return kmalloc_array(n, size, flags | __GFP_ZERO);
}
-#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
-/**
- * kmalloc_node - allocate memory from a specific node
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: node to allocate from.
- *
- * kmalloc() for non-local nodes, used to allocate from a specific node
- * if available. Equivalent to kmalloc() in the non-NUMA single-node
- * case.
- */
-static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return kmalloc(size, flags);
-}
-
-static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc(size, flags);
-}
-
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-
-static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
- gfp_t flags, int node)
-{
- return kmem_cache_alloc(cachep, flags);
-}
-#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
-
/*
* kmalloc_track_caller is a special version of kmalloc that records the
* calling function of the routine calling it for slab leak tracking instead
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index cd401580bdd3..e9346b4f1ef4 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -3,20 +3,6 @@
/*
* Definitions unique to the original Linux SLAB allocator.
- *
- * What we provide here is a way to optimize the frequent kmalloc
- * calls in the kernel by selecting the appropriate general cache
- * if kmalloc was called with a size that can be established at
- * compile time.
- */
-
-#include <linux/init.h>
-#include <linux/compiler.h>
-
-/*
- * struct kmem_cache
- *
- * manages a cache.
*/
struct kmem_cache {
@@ -102,96 +88,4 @@ struct kmem_cache {
*/
};
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size)
-{
- return kmem_cache_alloc(cachep, flags);
-}
-#endif
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- struct kmem_cache *cachep;
- void *ret;
-
- if (__builtin_constant_p(size)) {
- int i;
-
- if (!size)
- return ZERO_SIZE_PTR;
-
- if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
- return NULL;
-
- i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- cachep = kmalloc_dma_caches[i];
- else
-#endif
- cachep = kmalloc_caches[i];
-
- ret = kmem_cache_alloc_trace(cachep, flags, size);
-
- return ret;
- }
- return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
-extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid,
- size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *cachep,
- gfp_t flags,
- int nodeid,
- size_t size)
-{
- return kmem_cache_alloc_node(cachep, flags, nodeid);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- struct kmem_cache *cachep;
-
- if (__builtin_constant_p(size)) {
- int i;
-
- if (!size)
- return ZERO_SIZE_PTR;
-
- if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
- return NULL;
-
- i = kmalloc_index(size);
-
-#ifdef CONFIG_ZONE_DMA
- if (flags & GFP_DMA)
- cachep = kmalloc_dma_caches[i];
- else
-#endif
- cachep = kmalloc_caches[i];
-
- return kmem_cache_alloc_node_trace(cachep, flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-
-#endif /* CONFIG_NUMA */
-
#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
deleted file mode 100644
index 095a5a4a8516..000000000000
--- a/include/linux/slob_def.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef __LINUX_SLOB_DEF_H
-#define __LINUX_SLOB_DEF_H
-
-#include <linux/numa.h>
-
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
- gfp_t flags)
-{
- return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
-}
-
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- return __kmalloc_node(size, flags, node);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- return __kmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
-{
- return kmalloc(size, flags);
-}
-
-#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 027276fa8713..cc0b67eada42 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -6,14 +6,8 @@
*
* (C) 2007 SGI, Christoph Lameter
*/
-#include <linux/types.h>
-#include <linux/gfp.h>
-#include <linux/bug.h>
-#include <linux/workqueue.h>
#include <linux/kobject.h>
-#include <linux/kmemleak.h>
-
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
@@ -104,108 +98,4 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
-void *__kmalloc(size_t size, gfp_t flags);
-
-static __always_inline void *
-kmalloc_order(size_t size, gfp_t flags, unsigned int order)
-{
- void *ret;
-
- flags |= (__GFP_COMP | __GFP_KMEMCG);
- ret = (void *) __get_free_pages(flags, order);
- kmemleak_alloc(ret, size, 1, flags);
- return ret;
-}
-
-/**
- * Calling this on allocated memory will check that the memory
- * is expected to be in use, and print warnings if not.
- */
-#ifdef CONFIG_SLUB_DEBUG
-extern bool verify_mem_not_deleted(const void *x);
-#else
-static inline bool verify_mem_not_deleted(const void *x)
-{
- return true;
-}
-#endif
-
-#ifdef CONFIG_TRACING
-extern void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
-#else
-static __always_inline void *
-kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
-{
- return kmem_cache_alloc(s, gfpflags);
-}
-
-static __always_inline void *
-kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
-{
- return kmalloc_order(size, flags, order);
-}
-#endif
-
-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
-{
- unsigned int order = get_order(size);
- return kmalloc_order_trace(size, flags, order);
-}
-
-static __always_inline void *kmalloc(size_t size, gfp_t flags)
-{
- if (__builtin_constant_p(size)) {
- if (size > KMALLOC_MAX_CACHE_SIZE)
- return kmalloc_large(size, flags);
-
- if (!(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_trace(kmalloc_caches[index],
- flags, size);
- }
- }
- return __kmalloc(size, flags);
-}
-
-#ifdef CONFIG_NUMA
-void *__kmalloc_node(size_t size, gfp_t flags, int node);
-void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
-
-#ifdef CONFIG_TRACING
-extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size);
-#else
-static __always_inline void *
-kmem_cache_alloc_node_trace(struct kmem_cache *s,
- gfp_t gfpflags,
- int node, size_t size)
-{
- return kmem_cache_alloc_node(s, gfpflags, node);
-}
-#endif
-
-static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
-{
- if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
- int index = kmalloc_index(size);
-
- if (!index)
- return ZERO_SIZE_PTR;
-
- return kmem_cache_alloc_node_trace(kmalloc_caches[index],
- flags, node, size);
- }
- return __kmalloc_node(size, flags, node);
-}
-#endif
-
#endif /* _LINUX_SLUB_DEF_H */