summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2019-07-11 20:53:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 11:05:41 -0700
commita64b53780ec35b77daf817210c88aa42d172c98f (patch)
tree68b21fc96d39f7a4570e0816b44ff83428a69173 /mm/slab.c
parent598a0717a816abc8f5d3c4598628338b9190d127 (diff)
downloadlinux-stable-a64b53780ec35b77daf817210c88aa42d172c98f.tar.gz
linux-stable-a64b53780ec35b77daf817210c88aa42d172c98f.tar.bz2
linux-stable-a64b53780ec35b77daf817210c88aa42d172c98f.zip
mm/slab: sanity-check page type when looking up cache
This avoids any possible type confusion when looking up an object. For example, if a non-slab were to be passed to kfree(), the invalid slab_cache pointer (i.e. overlapped with some other value from the struct page union) would be used for subsequent slab manipulations that could lead to further memory corruption. Since the page is already in cache, adding the PageSlab() check will have nearly zero cost, so add a check and WARN() to virt_to_cache(). Additionally replaces an open-coded virt_to_cache(). To support the failure mode this also updates all callers of virt_to_cache() and cache_from_obj() to handle a NULL cache pointer return value (though note that several already handle this case gracefully). [dan.carpenter@oracle.com: restore IRQs in kfree()] Link: http://lkml.kernel.org/r/20190613065637.GE16334@mwanda Link: http://lkml.kernel.org/r/20190530045017.15252-3-keescook@chromium.org Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Cc: Alexander Popov <alex.popov@linux.com> Cc: Alexander Potapenko <glider@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/mm/slab.c b/mm/slab.c
index f7117ad9b3a3..db01e9aae31b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -371,12 +371,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
static int slab_max_order = SLAB_MAX_ORDER_LO;
static bool slab_max_order_set __initdata;
-static inline struct kmem_cache *virt_to_cache(const void *obj)
-{
- struct page *page = virt_to_head_page(obj);
- return page->slab_cache;
-}
-
static inline void *index_to_obj(struct kmem_cache *cache, struct page *page,
unsigned int idx)
{
@@ -3715,6 +3709,8 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
s = virt_to_cache(objp);
else
s = cache_from_obj(orig_s, objp);
+ if (!s)
+ continue;
debug_check_no_locks_freed(objp, s->object_size);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
@@ -3749,6 +3745,10 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
+ if (!c) {
+ local_irq_restore(flags);
+ return;
+ }
debug_check_no_locks_freed(objp, c->object_size);
debug_check_no_obj_freed(objp, c->object_size);
@@ -4219,13 +4219,15 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
*/
size_t ksize(const void *objp)
{
+ struct kmem_cache *c;
size_t size;
BUG_ON(!objp);
if (unlikely(objp == ZERO_SIZE_PTR))
return 0;
- size = virt_to_cache(objp)->object_size;
+ c = virt_to_cache(objp);
+ size = c ? c->object_size : 0;
/* We assume that ksize callers could use the whole allocated area,
* so we need to unpoison this area.
*/