summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-13 13:28:13 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-13 13:28:13 -0700
commitbf3a340738bc78008e496257c04fb5a7fc8281e6 (patch)
tree3e84d21261ff0c437f0ea2507df8c30844150769 /include
parent321d03c86732e45f5f33ad0db5b68e2e1364acb9 (diff)
parent34bf6ef94a835a8f1d8abd3e7d38c6c08d205867 (diff)
downloadlinux-stable-bf3a340738bc78008e496257c04fb5a7fc8281e6.tar.gz
linux-stable-bf3a340738bc78008e496257c04fb5a7fc8281e6.tar.bz2
linux-stable-bf3a340738bc78008e496257c04fb5a7fc8281e6.zip
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux
Pull slab changes from Pekka Enberg: "The biggest change is byte-sized freelist indices which reduces slab freelist memory usage: https://lkml.org/lkml/2013/12/2/64" * 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux: mm: slab/slub: use page->list consistently instead of page->lru mm/slab.c: cleanup outdated comments and unify variables naming slab: fix wrongly used macro slub: fix high order page allocation problem with __GFP_NOFAIL slab: Make allocations with GFP_ZERO slightly more efficient slab: make more slab management structure off the slab slab: introduce byte sized index for the freelist of a slab slab: restrict the number of objects in a slab slab: introduce helper functions to get/set free object slab: factor out calculate nr objects in cache_estimate
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/slab.h11
2 files changed, 13 insertions, 1 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 2b58d192ea24..8967e20cbe57 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -124,6 +124,8 @@ struct page {
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
+ * Can be used as a generic list
+ * by the page owner.
*/
struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */
@@ -136,7 +138,6 @@ struct page {
#endif
};
- struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3dd389aa91c7..307bfbe62387 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -242,6 +242,17 @@ struct kmem_cache {
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif
+/*
+ * This restriction comes from byte sized index implementation.
+ * Page size is normally 2^12 bytes and, in this case, if we want to use
+ * byte sized index which can represent 2^8 entries, the size of the object
+ * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
+ * If minimum size of kmalloc is less than 16, we use it as minimum object
+ * size and give up to use byte sized index.
+ */
+#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
+ (KMALLOC_MIN_SIZE) : 16)
+
#ifndef CONFIG_SLOB
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA