summaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2022-09-29 11:30:55 +0200
committerVlastimil Babka <vbabka@suse.cz>2022-09-29 11:30:55 +0200
commit445d41d7a7c15793933f47c0c23fae3a1d09a8c1 (patch)
tree2621791892a2cc5b2df5e762af3843ae47453254 /mm/slab_common.c
parentaf961f8059a42d1b9941dd8aa83420b25fd17e91 (diff)
parent05a940656e1eb2026d9ee31019d5b47e9545124d (diff)
downloadlinux-445d41d7a7c15793933f47c0c23fae3a1d09a8c1.tar.gz
linux-445d41d7a7c15793933f47c0c23fae3a1d09a8c1.tar.bz2
linux-445d41d7a7c15793933f47c0c23fae3a1d09a8c1.zip
Merge branch 'slab/for-6.1/kmalloc_size_roundup' into slab/for-next
The first two patches from a series by Kees Cook [1] that introduce kmalloc_size_roundup(). This will allow merging of per-subsystem patches using the new function and ultimately stop (ab)using ksize() in a way that causes ongoing trouble for debugging functionality and static checkers. [1] https://lore.kernel.org/all/20220923202822.2667581-1-keescook@chromium.org/ -- Resolved a conflict of modifying mm/slab.c __ksize() comment with a commit that unifies __ksize() implementation into mm/slab_common.c
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c37
1 files changed, 34 insertions, 3 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 82c10b4d1203..9ad97ae73a0a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -734,6 +734,26 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
return kmalloc_caches[kmalloc_type(flags)][index];
}
+size_t kmalloc_size_roundup(size_t size)
+{
+ struct kmem_cache *c;
+
+ /* Short-circuit the 0 size case. */
+ if (unlikely(size == 0))
+ return 0;
+ /* Short-circuit saturated "too-large" case. */
+ if (unlikely(size == SIZE_MAX))
+ return SIZE_MAX;
+ /* Above the smaller buckets, size is a multiple of page size. */
+ if (size > KMALLOC_MAX_CACHE_SIZE)
+ return PAGE_SIZE << get_order(size);
+
+ /* The flags don't matter since size_index is common to all. */
+ c = kmalloc_slab(size, GFP_KERNEL);
+ return c ? c->object_size : 0;
+}
+EXPORT_SYMBOL(kmalloc_size_roundup);
+
#ifdef CONFIG_ZONE_DMA
#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
#else
@@ -987,7 +1007,18 @@ void kfree(const void *object)
}
EXPORT_SYMBOL(kfree);
-/* Uninstrumented ksize. Only called by KASAN. */
+/**
+ * __ksize -- Report full size of underlying allocation
+ * @objp: pointer to the object
+ *
+ * This should only be used internally to query the true size of allocations.
+ * It is not meant to be a way to discover the usable size of an allocation
+ * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
+ * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
+ * and/or FORTIFY_SOURCE.
+ *
+ * Return: size of the actual memory used by @objp in bytes
+ */
size_t __ksize(const void *object)
{
struct folio *folio;
@@ -1294,8 +1325,8 @@ module_init(slab_proc_init);
#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
-static __always_inline void *__do_krealloc(const void *p, size_t new_size,
- gfp_t flags)
+static __always_inline __realloc_size(2) void *
+__do_krealloc(const void *p, size_t new_size, gfp_t flags)
{
void *ret;
size_t ks;