summaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorLevin, Alexander (Sasha Levin) <alexander.levin@verizon.com>2017-11-15 17:35:51 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 18:21:04 -0800
commit4950276672fce5c241857540f8561c440663673d (patch)
tree014141bd67d83260a5d5de744e5d62a5cbf6c83b /mm/slub.c
parentcdb07bdea28ebf1286a979501620745680596365 (diff)
downloadlinux-4950276672fce5c241857540f8561c440663673d.tar.gz
linux-4950276672fce5c241857540f8561c440663673d.tar.bz2
linux-4950276672fce5c241857540f8561c440663673d.zip
kmemcheck: remove annotations
Patch series "kmemcheck: kill kmemcheck", v2. As discussed at LSF/MM, kill kmemcheck. KASan is a replacement that is able to work without the limitation of kmemcheck (single CPU, slow). KASan is already upstream. We are also not aware of any users of kmemcheck (or users who don't consider KASan as a suitable replacement). The only objection was that since KASAN wasn't supported by all GCC versions provided by distros at that time we should hold off for 2 years, and try again. Now that 2 years have passed, and all distros provide gcc that supports KASAN, kill kmemcheck again for the very same reasons. This patch (of 4): Remove kmemcheck annotations, and calls to kmemcheck from the kernel. [alexander.levin@verizon.com: correctly remove kmemcheck call from dma_map_sg_attrs] Link: http://lkml.kernel.org/r/20171012192151.26531-1-alexander.levin@verizon.com Link: http://lkml.kernel.org/r/20171007030159.22241-2-alexander.levin@verizon.com Signed-off-by: Sasha Levin <alexander.levin@verizon.com> Cc: Alexander Potapenko <glider@google.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Pekka Enberg <penberg@kernel.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Tim Hansen <devtimhansen@gmail.com> Cc: Vegard Nossum <vegardno@ifi.uio.no> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c20
1 files changed, 0 insertions, 20 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 51484f0fc068..ac3b50b9abec 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -22,7 +22,6 @@
#include <linux/notifier.h>
#include <linux/seq_file.h>
#include <linux/kasan.h>
-#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
@@ -1377,7 +1376,6 @@ static inline void *slab_free_hook(struct kmem_cache *s, void *x)
unsigned long flags;
local_irq_save(flags);
- kmemcheck_slab_free(s, x, s->object_size);
debug_check_no_locks_freed(x, s->object_size);
local_irq_restore(flags);
}
@@ -1598,22 +1596,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
stat(s, ORDER_FALLBACK);
}
- if (kmemcheck_enabled &&
- !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
- int pages = 1 << oo_order(oo);
-
- kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
-
- /*
- * Objects from caches that have a constructor don't get
- * cleared when they're allocated, so we need to do it here.
- */
- if (s->ctor)
- kmemcheck_mark_uninitialized_pages(page, pages);
- else
- kmemcheck_mark_unallocated_pages(page, pages);
- }
-
page->objects = oo_objects(oo);
order = compound_order(page);
@@ -1689,8 +1671,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
check_object(s, page, p, SLUB_RED_INACTIVE);
}
- kmemcheck_free_shadow(page, compound_order(page));
-
mod_lruvec_page_state(page,
(s->flags & SLAB_RECLAIM_ACCOUNT) ?
NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,