summaryrefslogtreecommitdiffstats
path: root/mm/mempool.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mempool.c')
-rw-r--r--mm/mempool.c75
1 files changed, 61 insertions, 14 deletions
diff --git a/mm/mempool.c b/mm/mempool.c
index 4759be0ff9de..dbbf0e9fb424 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -56,6 +56,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
static void check_element(mempool_t *pool, void *element)
{
+ /* Skip checking: KASAN might save its metadata in the element. */
+ if (kasan_enabled())
+ return;
+
/* Mempools backed by slab allocator */
if (pool->free == mempool_kfree) {
__check_element(pool, element, (size_t)pool->pool_data);
@@ -64,10 +68,10 @@ static void check_element(mempool_t *pool, void *element)
} else if (pool->free == mempool_free_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_atomic((struct page *)element);
+ void *addr = kmap_local_page((struct page *)element);
__check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
}
@@ -81,6 +85,10 @@ static void __poison_element(void *element, size_t size)
static void poison_element(mempool_t *pool, void *element)
{
+ /* Skip poisoning: KASAN might save its metadata in the element. */
+ if (kasan_enabled())
+ return;
+
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_kmalloc) {
__poison_element(element, (size_t)pool->pool_data);
@@ -89,10 +97,10 @@ static void poison_element(mempool_t *pool, void *element)
} else if (pool->alloc == mempool_alloc_pages) {
/* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
- void *addr = kmap_atomic((struct page *)element);
+ void *addr = kmap_local_page((struct page *)element);
__poison_element(addr, 1UL << (PAGE_SHIFT + order));
- kunmap_atomic(addr);
+ kunmap_local(addr);
}
}
#else /* CONFIG_SLUB_DEBUG_ON */
@@ -104,32 +112,34 @@ static inline void poison_element(mempool_t *pool, void *element)
}
#endif /* CONFIG_SLUB_DEBUG_ON */
-static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
+static __always_inline bool kasan_poison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
- kasan_slab_free_mempool(element);
+ return kasan_mempool_poison_object(element);
else if (pool->alloc == mempool_alloc_pages)
- kasan_poison_pages(element, (unsigned long)pool->pool_data,
- false);
+ return kasan_mempool_poison_pages(element,
+ (unsigned long)pool->pool_data);
+ return true;
}
static void kasan_unpoison_element(mempool_t *pool, void *element)
{
if (pool->alloc == mempool_kmalloc)
- kasan_unpoison_range(element, (size_t)pool->pool_data);
+ kasan_mempool_unpoison_object(element, (size_t)pool->pool_data);
else if (pool->alloc == mempool_alloc_slab)
- kasan_unpoison_range(element, kmem_cache_size(pool->pool_data));
+ kasan_mempool_unpoison_object(element,
+ kmem_cache_size(pool->pool_data));
else if (pool->alloc == mempool_alloc_pages)
- kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
- false);
+ kasan_mempool_unpoison_pages(element,
+ (unsigned long)pool->pool_data);
}
static __always_inline void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
poison_element(pool, element);
- kasan_poison_element(pool, element);
- pool->elements[pool->curr_nr++] = element;
+ if (kasan_poison_element(pool, element))
+ pool->elements[pool->curr_nr++] = element;
}
static void *remove_element(mempool_t *pool)
@@ -447,6 +457,43 @@ repeat_alloc:
EXPORT_SYMBOL(mempool_alloc);
/**
+ * mempool_alloc_preallocated - allocate an element from preallocated elements
+ * belonging to a specific memory pool
+ * @pool: pointer to the memory pool which was allocated via
+ * mempool_create().
+ *
+ * This function is similar to mempool_alloc, but it only attempts allocating
+ * an element from the preallocated elements. It does not sleep and immediately
+ * returns if no preallocated elements are available.
+ *
+ * Return: pointer to the allocated element or %NULL if no elements are
+ * available.
+ */
+void *mempool_alloc_preallocated(mempool_t *pool)
+{
+ void *element;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ if (likely(pool->curr_nr)) {
+ element = remove_element(pool);
+ spin_unlock_irqrestore(&pool->lock, flags);
+ /* paired with rmb in mempool_free(), read comment there */
+ smp_wmb();
+ /*
+ * Update the allocation stack trace as this is more useful
+ * for debugging.
+ */
+ kmemleak_update_trace(element);
+ return element;
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mempool_alloc_preallocated);
+
+/**
* mempool_free - return an element to the pool.
* @element: pool element pointer.
* @pool: pointer to the memory pool which was allocated via