diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-29 16:34:12 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-06-29 16:34:12 -0700 |
commit | 632f54b4d60bfe0701f43d0bc387928de6e3dcfb (patch) | |
tree | fac09ccb563bdd3e71133c3f571db6d747fe94fa /mm/slab.c | |
parent | bf1fa6f15553df04f2bdd06190ccd5f388ab0777 (diff) | |
parent | 7bc162d5cc4de5c33c5570dba2719a01506a9fd0 (diff) | |
download | linux-632f54b4d60bfe0701f43d0bc387928de6e3dcfb.tar.gz linux-632f54b4d60bfe0701f43d0bc387928de6e3dcfb.tar.bz2 linux-632f54b4d60bfe0701f43d0bc387928de6e3dcfb.zip |
Merge tag 'slab-for-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab
Pull slab updates from Vlastimil Babka:
- SLAB deprecation:
Following the discussion at LSF/MM 2023 [1] and no objections, the
SLAB allocator is deprecated by renaming the config option (to make
its users notice) to CONFIG_SLAB_DEPRECATED with updated help text.
SLUB should be used instead. Existing defconfigs with CONFIG_SLAB are
also updated.
- SLAB_NO_MERGE kmem_cache flag (Jesper Dangaard Brouer):
There are (very limited) cases where kmem_cache merging is
undesirable, and existing ways to prevent it are hacky. Introduce a
new flag to do that cleanly and convert the existing hacky users.
Btrfs plans to use this for debug kernel builds (that use case is
always fine), networking for performance reasons (that should be very
rare).
- Replace the usage of weak PRNGs (David Keisar Schmidt):
In addition to using stronger RNGs for the security related features,
the code is a bit cleaner.
- Misc code cleanups (SeongJae Parki, Xiongwei Song, Zhen Lei, and
zhaoxinchao)
Link: https://lwn.net/Articles/932201/ [1]
* tag 'slab-for-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab:
mm/slab_common: use SLAB_NO_MERGE instead of negative refcount
mm/slab: break up RCU readers on SLAB_TYPESAFE_BY_RCU example code
mm/slab: add a missing semicolon on SLAB_TYPESAFE_BY_RCU example code
mm/slab_common: reduce an if statement in create_cache()
mm/slab: introduce kmem_cache flag SLAB_NO_MERGE
mm/slab: rename CONFIG_SLAB to CONFIG_SLAB_DEPRECATED
mm/slab: remove HAVE_HARDENED_USERCOPY_ALLOCATOR
mm/slab_common: Replace invocation of weak PRNG
mm/slab: Replace invocation of weak PRNG
slub: Don't read nr_slabs and total_objects directly
slub: Remove slabs_node() function
slub: Remove CONFIG_SMP defined check
slub: Put objects_show() into CONFIG_SLUB_DEBUG enabled block
slub: Correct the error code when slab_kset is NULL
mm/slab: correct return values in comment for _kmem_cache_create()
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 37 |
1 files changed, 11 insertions, 26 deletions
diff --git a/mm/slab.c b/mm/slab.c index b7817dcba63e..88194391d553 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1883,14 +1883,12 @@ static bool set_on_slab_cache(struct kmem_cache *cachep, return true; } -/** +/* * __kmem_cache_create - Create a cache. * @cachep: cache management descriptor * @flags: SLAB flags * - * Returns a ptr to the cache on success, NULL on failure. - * Cannot be called within an int, but can be interrupted. - * The @ctor is run when new pages are allocated by the cache. + * Returns zero on success, nonzero on failure. * * The flags are * @@ -1903,8 +1901,6 @@ static bool set_on_slab_cache(struct kmem_cache *cachep, * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware * cacheline. This can be beneficial if you're counting cycles as closely * as davem. - * - * Return: a pointer to the created cache or %NULL in case of error */ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) { @@ -2355,44 +2351,34 @@ static void cache_init_objs_debug(struct kmem_cache *cachep, struct slab *slab) #ifdef CONFIG_SLAB_FREELIST_RANDOM /* Hold information during a freelist initialization */ -union freelist_init_state { - struct { - unsigned int pos; - unsigned int *list; - unsigned int count; - }; - struct rnd_state rnd_state; +struct freelist_init_state { + unsigned int pos; + unsigned int *list; + unsigned int count; }; /* * Initialize the state based on the randomization method available. * return true if the pre-computed list is available, false otherwise. */ -static bool freelist_state_initialize(union freelist_init_state *state, +static bool freelist_state_initialize(struct freelist_init_state *state, struct kmem_cache *cachep, unsigned int count) { bool ret; - unsigned int rand; - - /* Use best entropy available to define a random shift */ - rand = get_random_u32(); - - /* Use a random state if the pre-computed list is not available */ if (!cachep->random_seq) { - prandom_seed_state(&state->rnd_state, rand); ret = false; } else { state->list = cachep->random_seq; state->count = count; - state->pos = rand % count; + state->pos = get_random_u32_below(count); ret = true; } return ret; } /* Get the next entry on the list and randomize it using a random shift */ -static freelist_idx_t next_random_slot(union freelist_init_state *state) +static freelist_idx_t next_random_slot(struct freelist_init_state *state) { if (state->pos >= state->count) state->pos = 0; @@ -2413,7 +2399,7 @@ static void swap_free_obj(struct slab *slab, unsigned int a, unsigned int b) static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) { unsigned int objfreelist = 0, i, rand, count = cachep->num; - union freelist_init_state state; + struct freelist_init_state state; bool precomputed; if (count < 2) @@ -2442,8 +2428,7 @@ static bool shuffle_freelist(struct kmem_cache *cachep, struct slab *slab) /* Fisher-Yates shuffle */ for (i = count - 1; i > 0; i--) { - rand = prandom_u32_state(&state.rnd_state); - rand %= (i + 1); + rand = get_random_u32_below(i + 1); swap_free_obj(slab, i, rand); } } else { |