summaryrefslogtreecommitdiffstats
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-03-19 21:05:24 -0400
committerDavid S. Miller <davem@davemloft.net>2016-03-19 21:05:24 -0400
commitc78a85a8430e35aecd53e190a2f6b999062b1037 (patch)
treefbd69827579db437492fb77b5faa53814114bed3 /include/linux/slab.h
parent133800d1f0288b9ddfc0d0aded10d9efa82d5b8c (diff)
parentde06dbfa7861c9019eedefc0c356ba86e5098f1b (diff)
downloadlinux-stable-c78a85a8430e35aecd53e190a2f6b999062b1037.tar.gz
linux-stable-c78a85a8430e35aecd53e190a2f6b999062b1037.tar.bz2
linux-stable-c78a85a8430e35aecd53e190a2f6b999062b1037.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h13
1 files changed, 11 insertions, 2 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3627d5c1bc47..e4b568738ca3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -20,7 +20,7 @@
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
*/
-#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
+#define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */
#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
@@ -314,7 +314,7 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment
void kmem_cache_free(struct kmem_cache *, void *);
/*
- * Bulk allocation and freeing operations. These are accellerated in an
+ * Bulk allocation and freeing operations. These are accelerated in an
* allocator specific way to avoid taking locks repeatedly or building
* metadata structures unnecessarily.
*
@@ -323,6 +323,15 @@ void kmem_cache_free(struct kmem_cache *, void *);
void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
+/*
+ * Caller must not use kfree_bulk() on memory not originally allocated
+ * by kmalloc(), because the SLOB allocator cannot handle this.
+ */
+static __always_inline void kfree_bulk(size_t size, void **p)
+{
+ kmem_cache_free_bulk(NULL, size, p);
+}
+
#ifdef CONFIG_NUMA
void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment;
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment;