diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2022-04-29 14:37:59 -0700 |
---|---|---|
committer | akpm <akpm@linux-foundation.org> | 2022-04-29 14:37:59 -0700 |
commit | 5d8de293c224896a4da99763fce4f9794308caf4 (patch) | |
tree | 06554517aca8f686fdc890da04dd811c1254c995 /fs/proc/vmcore.c | |
parent | 04d168c6d42d1772d35372301a14bb20784c81c5 (diff) | |
download | linux-5d8de293c224896a4da99763fce4f9794308caf4.tar.gz linux-5d8de293c224896a4da99763fce4f9794308caf4.tar.bz2 linux-5d8de293c224896a4da99763fce4f9794308caf4.zip |
vmcore: convert copy_oldmem_page() to take an iov_iter
Patch series "Convert vmcore to use an iov_iter", v5.
For some reason several people have been sending bad patches to fix
compiler warnings in vmcore recently. Here's how it should be done.
Compile-tested only on x86. As noted in the first patch, s390 should take
this conversion a bit further, but I'm not inclined to do that work
myself.
This patch (of 3):
Instead of passing in a 'buf' and 'userbuf' argument, pass in an iov_iter.
s390 needs more work to pass the iov_iter down further, or refactor, but
I'd be more comfortable if someone who can test on s390 did that work.
It's more convenient to convert the whole of read_from_oldmem() to take an
iov_iter at the same time, so rename it to read_from_oldmem_iter() and add
a temporary read_from_oldmem() wrapper that creates an iov_iter.
Link: https://lkml.kernel.org/r/20220408090636.560886-1-bhe@redhat.com
Link: https://lkml.kernel.org/r/20220408090636.560886-2-bhe@redhat.com
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Baoquan He <bhe@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'fs/proc/vmcore.c')
-rw-r--r-- | fs/proc/vmcore.c | 54 |
1 files changed, 34 insertions, 20 deletions
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 6f1b8ddc6f7a..54dda2e19ed1 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -26,6 +26,7 @@ #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/uaccess.h> +#include <linux/uio.h> #include <linux/cc_platform.h> #include <asm/io.h> #include "internal.h" @@ -128,9 +129,8 @@ static int open_vmcore(struct inode *inode, struct file *file) } /* Reads a page from the oldmem device from given offset. */ -ssize_t read_from_oldmem(char *buf, size_t count, - u64 *ppos, int userbuf, - bool encrypted) +static ssize_t read_from_oldmem_iter(struct iov_iter *iter, size_t count, + u64 *ppos, bool encrypted) { unsigned long pfn, offset; size_t nr_bytes; @@ -152,29 +152,23 @@ ssize_t read_from_oldmem(char *buf, size_t count, /* If pfn is not ram, return zeros for sparse dump files */ if (!pfn_is_ram(pfn)) { - tmp = 0; - if (!userbuf) - memset(buf, 0, nr_bytes); - else if (clear_user(buf, nr_bytes)) - tmp = -EFAULT; + tmp = iov_iter_zero(nr_bytes, iter); } else { if (encrypted) - tmp = copy_oldmem_page_encrypted(pfn, buf, + tmp = copy_oldmem_page_encrypted(iter, pfn, nr_bytes, - offset, - userbuf); + offset); else - tmp = copy_oldmem_page(pfn, buf, nr_bytes, - offset, userbuf); + tmp = copy_oldmem_page(iter, pfn, nr_bytes, + offset); } - if (tmp < 0) { + if (tmp < nr_bytes) { srcu_read_unlock(&vmcore_cb_srcu, idx); - return tmp; + return -EFAULT; } *ppos += nr_bytes; count -= nr_bytes; - buf += nr_bytes; read += nr_bytes; ++pfn; offset = 0; @@ -184,6 +178,27 @@ ssize_t read_from_oldmem(char *buf, size_t count, return read; } +ssize_t read_from_oldmem(char *buf, size_t count, + u64 *ppos, int userbuf, + bool encrypted) +{ + struct iov_iter iter; + struct iovec iov; + struct kvec kvec; + + if (userbuf) { + iov.iov_base = (__force void __user *)buf; + iov.iov_len = count; + iov_iter_init(&iter, READ, &iov, 1, count); + } else { + kvec.iov_base = buf; + kvec.iov_len = count; + iov_iter_kvec(&iter, READ, &kvec, 1, count); + } + + return read_from_oldmem_iter(&iter, count, ppos, encrypted); +} + /* * Architectures may override this function to allocate ELF header in 2nd kernel */ @@ -228,11 +243,10 @@ int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, /* * Architectures which support memory encryption override this. */ -ssize_t __weak -copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, - unsigned long offset, int userbuf) +ssize_t __weak copy_oldmem_page_encrypted(struct iov_iter *iter, + unsigned long pfn, size_t csize, unsigned long offset) { - return copy_oldmem_page(pfn, buf, csize, offset, userbuf); + return copy_oldmem_page(iter, pfn, csize, offset); } /* |