summaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2024-03-13 14:10:40 -0600
committerJens Axboe <axboe@kernel.dk>2024-04-15 08:10:26 -0600
commit09fc75e0c035a2cabb8caa15cec6e85159dd94f0 (patch)
treef02232f1d8429b1cbff1ee4481486dd446ad4544 /io_uring/io_uring.c
parent3ab1db3c6039e02a9deb9d5091d28d559917a645 (diff)
downloadlinux-09fc75e0c035a2cabb8caa15cec6e85159dd94f0.tar.gz
linux-09fc75e0c035a2cabb8caa15cec6e85159dd94f0.tar.bz2
linux-09fc75e0c035a2cabb8caa15cec6e85159dd94f0.zip
io_uring: use vmap() for ring mapping
This is the last holdout which does odd page checking, convert it to vmap just like what is done for the non-mmap path. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c38
1 files changed, 9 insertions, 29 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index fba68c37a77d..ef8f1c6ee253 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -63,7 +63,6 @@
#include <linux/sched/mm.h>
#include <linux/uaccess.h>
#include <linux/nospec.h>
-#include <linux/highmem.h>
#include <linux/fsnotify.h>
#include <linux/fadvise.h>
#include <linux/task_work.h>
@@ -2657,7 +2656,7 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
struct page **page_array;
unsigned int nr_pages;
void *page_addr;
- int ret, i, pinned;
+ int ret, pinned;
*npages = 0;
@@ -2679,34 +2678,13 @@ static void *__io_uaddr_map(struct page ***pages, unsigned short *npages,
goto free_pages;
}
- page_addr = page_address(page_array[0]);
- for (i = 0; i < nr_pages; i++) {
- ret = -EINVAL;
-
- /*
- * Can't support mapping user allocated ring memory on 32-bit
- * archs where it could potentially reside in highmem. Just
- * fail those with -EINVAL, just like we did on kernels that
- * didn't support this feature.
- */
- if (PageHighMem(page_array[i]))
- goto free_pages;
-
- /*
- * No support for discontig pages for now, should either be a
- * single normal page, or a huge page. Later on we can add
- * support for remapping discontig pages, for now we will
- * just fail them with EINVAL.
- */
- if (page_address(page_array[i]) != page_addr)
- goto free_pages;
- page_addr += PAGE_SIZE;
+ page_addr = vmap(page_array, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (page_addr) {
+ *pages = page_array;
+ *npages = nr_pages;
+ return page_addr;
}
-
- *pages = page_array;
- *npages = nr_pages;
- return page_to_virt(page_array[0]);
-
+ ret = -ENOMEM;
free_pages:
io_pages_free(&page_array, pinned > 0 ? pinned : 0);
return ERR_PTR(ret);
@@ -2736,6 +2714,8 @@ static void io_rings_free(struct io_ring_ctx *ctx)
ctx->n_ring_pages = 0;
io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
ctx->n_sqe_pages = 0;
+ vunmap(ctx->rings);
+ vunmap(ctx->sq_sqes);
}
ctx->rings = NULL;