summaryrefslogtreecommitdiffstats
path: root/io_uring/alloc_cache.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-03-20 15:19:44 -0600
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:25 -0600
commit414d0f45c316221acbf066658afdbae5b354a5cc (patch)
tree9c872a002b5fa5119cd91d706045ebb915c338d9 /io_uring/alloc_cache.h
parente10677a8f6980dbae2e866b8320d90bae07e87ee (diff)
downloadlinux-414d0f45c316221acbf066658afdbae5b354a5cc.tar.gz
linux-414d0f45c316221acbf066658afdbae5b354a5cc.tar.bz2
linux-414d0f45c316221acbf066658afdbae5b354a5cc.zip
io_uring/alloc_cache: switch to array based caching
Currently lists are being used to manage this, but best practice is usually to have these in an array instead as that it cheaper to manage. Outside of that detail, games are also played with KASAN as the list is inside the cached entry itself. Finally, all users of this need a struct io_cache_entry embedded in their struct, which is union'ized with something else in there that isn't used across the free -> realloc cycle. Get rid of all of that, and simply have it be an array. This will not change the memory used, as we're just trading an 8-byte member entry for the per-elem array size. This reduces the overhead of the recycled allocations, and it reduces the amount of code code needed to support recycling to about half of what it currently is. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/alloc_cache.h')
-rw-r--r--io_uring/alloc_cache.h57
1 files changed, 26 insertions, 31 deletions
diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h
index 138ad14b0b12..b7a38a2069cf 100644
--- a/io_uring/alloc_cache.h
+++ b/io_uring/alloc_cache.h
@@ -6,61 +6,56 @@
*/
#define IO_ALLOC_CACHE_MAX 128
-struct io_cache_entry {
- struct io_wq_work_node node;
-};
-
static inline bool io_alloc_cache_put(struct io_alloc_cache *cache,
- struct io_cache_entry *entry)
+ void *entry)
{
if (cache->nr_cached < cache->max_cached) {
- cache->nr_cached++;
- wq_stack_add_head(&entry->node, &cache->list);
- kasan_mempool_poison_object(entry);
+ if (!kasan_mempool_poison_object(entry))
+ return false;
+ cache->entries[cache->nr_cached++] = entry;
return true;
}
return false;
}
-static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache)
-{
- return !cache->list.next;
-}
-
-static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache)
+static inline void *io_alloc_cache_get(struct io_alloc_cache *cache)
{
- if (cache->list.next) {
- struct io_cache_entry *entry;
+ if (cache->nr_cached) {
+ void *entry = cache->entries[--cache->nr_cached];
- entry = container_of(cache->list.next, struct io_cache_entry, node);
kasan_mempool_unpoison_object(entry, cache->elem_size);
- cache->list.next = cache->list.next->next;
- cache->nr_cached--;
return entry;
}
return NULL;
}
-static inline void io_alloc_cache_init(struct io_alloc_cache *cache,
+/* returns false if the cache was initialized properly */
+static inline bool io_alloc_cache_init(struct io_alloc_cache *cache,
unsigned max_nr, size_t size)
{
- cache->list.next = NULL;
- cache->nr_cached = 0;
- cache->max_cached = max_nr;
- cache->elem_size = size;
+ cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL);
+ if (cache->entries) {
+ cache->nr_cached = 0;
+ cache->max_cached = max_nr;
+ cache->elem_size = size;
+ return false;
+ }
+ return true;
}
static inline void io_alloc_cache_free(struct io_alloc_cache *cache,
- void (*free)(struct io_cache_entry *))
+ void (*free)(const void *))
{
- while (1) {
- struct io_cache_entry *entry = io_alloc_cache_get(cache);
+ void *entry;
+
+ if (!cache->entries)
+ return;
- if (!entry)
- break;
+ while ((entry = io_alloc_cache_get(cache)) != NULL)
free(entry);
- }
- cache->nr_cached = 0;
+
+ kvfree(cache->entries);
+ cache->entries = NULL;
}
#endif