diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2024-07-15 10:44:16 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2024-07-15 10:44:16 +0200 |
commit | 436381eaf2a423e60fc8340399f7d2458091b383 (patch) | |
tree | 16ecce9d2dcb5ed43de60a966e9026c9ffd6e711 /mm/util.c | |
parent | a52c6330ff2fe1163333fa6609bdc6e8763ec286 (diff) | |
parent | d73778e4b86755d527a0c6b249cde846770b2f66 (diff) | |
download | linux-436381eaf2a423e60fc8340399f7d2458091b383.tar.gz linux-436381eaf2a423e60fc8340399f7d2458091b383.tar.bz2 linux-436381eaf2a423e60fc8340399f7d2458091b383.zip |
Merge branch 'slab/for-6.11/buckets' into slab/for-next
Merge all the slab patches previously collected on top of v6.10-rc1,
over cleanups/fixes that had to be based on rc6.
Diffstat (limited to 'mm/util.c')
-rw-r--r-- | mm/util.c | 23 |
1 files changed, 17 insertions, 6 deletions
diff --git a/mm/util.c b/mm/util.c index fe723241b66f..d87e73692cf5 100644 --- a/mm/util.c +++ b/mm/util.c @@ -198,6 +198,16 @@ char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) } EXPORT_SYMBOL(kmemdup_nul); +static kmem_buckets *user_buckets __ro_after_init; + +static int __init init_user_buckets(void) +{ + user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL); + + return 0; +} +subsys_initcall(init_user_buckets); + /** * memdup_user - duplicate memory region from user space * @@ -211,7 +221,7 @@ void *memdup_user(const void __user *src, size_t len) { void *p; - p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); + p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); @@ -237,7 +247,7 @@ void *vmemdup_user(const void __user *src, size_t len) { void *p; - p = kvmalloc(len, GFP_USER); + p = kmem_buckets_valloc(user_buckets, len, GFP_USER); if (!p) return ERR_PTR(-ENOMEM); @@ -594,9 +604,10 @@ unsigned long vm_mmap(struct file *file, unsigned long addr, EXPORT_SYMBOL(vm_mmap); /** - * kvmalloc_node - attempt to allocate physically contiguous memory, but upon + * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon * failure, fall back to non-contiguous (vmalloc) allocation. * @size: size of the request. + * @b: which set of kmalloc buckets to allocate from. * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL. * @node: numa node to allocate from * @@ -609,7 +620,7 @@ EXPORT_SYMBOL(vm_mmap); * * Return: pointer to the allocated memory of %NULL in case of failure */ -void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) +void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) { gfp_t kmalloc_flags = flags; void *ret; @@ -631,7 +642,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) kmalloc_flags &= ~__GFP_NOFAIL; } - ret = kmalloc_node_noprof(size, kmalloc_flags, node); + ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b), kmalloc_flags, node); /* * It doesn't really make sense to fallback to vmalloc for sub page @@ -660,7 +671,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, node, __builtin_return_address(0)); } -EXPORT_SYMBOL(kvmalloc_node_noprof); +EXPORT_SYMBOL(__kvmalloc_node_noprof); /** * kvfree() - Free memory. |