summaryrefslogtreecommitdiffstats
path: root/mm/kfence/kfence_test.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2023-07-18 10:16:05 +0200
committerVlastimil Babka <vbabka@suse.cz>2023-08-29 11:23:04 +0200
commit3d053e8060430b86bad0854b7c7f03f15be3a7e5 (patch)
tree83e0b8aad30b8090b2497991b221b6d69407b2b2 /mm/kfence/kfence_test.c
parent1662b6c2bb7e7502d6ae4b6aca4116e844a4277c (diff)
parent3c6152940584290668b35fa0800026f6a1ae05fe (diff)
downloadlinux-stable-3d053e8060430b86bad0854b7c7f03f15be3a7e5.tar.gz
linux-stable-3d053e8060430b86bad0854b7c7f03f15be3a7e5.tar.bz2
linux-stable-3d053e8060430b86bad0854b7c7f03f15be3a7e5.zip
Merge branch 'slab/for-6.6/random_kmalloc' into slab/for-next
Merge the new hardening feature to make heap spraying harder, by GONG, Ruiqi. It creates multiple (16) copies of kmalloc caches, reducing the chance of an attacker-controllable allocation site to land in the same slab as e.g. an allocation site with use-after-free vulnerability. The selection of the copy is derived from the allocation site address, including a per-boot random seed. In line with SLAB deprecation, this is a SLUB only feature, incompatible with SLUB_TINY due to the memory overhead of the extra cache copies.
Diffstat (limited to 'mm/kfence/kfence_test.c')
-rw-r--r--mm/kfence/kfence_test.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 9e008a336d9f..95b2b84c296d 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -212,7 +212,9 @@ static void test_cache_destroy(void)
static inline size_t kmalloc_cache_alignment(size_t size)
{
- return kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)]->align;
+ /* just to get ->align so no need to pass in the real caller */
+ enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, 0);
+ return kmalloc_caches[type][__kmalloc_index(size, false)]->align;
}
/* Must always inline to match stack trace against caller. */
@@ -282,8 +284,9 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat
if (is_kfence_address(alloc)) {
struct slab *slab = virt_to_slab(alloc);
+ enum kmalloc_cache_type type = kmalloc_type(GFP_KERNEL, _RET_IP_);
struct kmem_cache *s = test_cache ?:
- kmalloc_caches[kmalloc_type(GFP_KERNEL)][__kmalloc_index(size, false)];
+ kmalloc_caches[type][__kmalloc_index(size, false)];
/*
* Verify that various helpers return the right values