diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-01 16:03:37 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-01 16:03:37 -0700 |
commit | 4b01285e1672ed9342ace952e92eb1e1db7134ae (patch) | |
tree | 0c7389b64a3af8babefaafd3d261138ed43b719f /arch | |
parent | b23c4771ff62de8ca9b5e4a2d64491b2fb6f8f69 (diff) | |
parent | 001c1a655f0a4e4ebe5d9beb47466dc5c6ab4871 (diff) | |
download | linux-4b01285e1672ed9342ace952e92eb1e1db7134ae.tar.gz linux-4b01285e1672ed9342ace952e92eb1e1db7134ae.tar.bz2 linux-4b01285e1672ed9342ace952e92eb1e1db7134ae.zip |
Merge branch 'uaccess.csum' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess/csum updates from Al Viro:
"Regularize the sitation with uaccess checksum primitives:
- fold csum_partial_... into csum_and_copy_..._user()
- on x86 collapse several access_ok()/stac()/clac() into
user_access_begin()/user_access_end()"
* 'uaccess.csum' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
default csum_and_copy_to_user(): don't bother with access_ok()
take the dummy csum_and_copy_from_user() into net/checksum.h
arm: switch to csum_and_copy_from_user()
sh32: convert to csum_and_copy_from_user()
m68k: convert to csum_and_copy_from_user()
xtensa: switch to providing csum_and_copy_from_user()
sparc: switch to providing csum_and_copy_from_user()
parisc: turn csum_partial_copy_from_user() into csum_and_copy_from_user()
alpha: turn csum_partial_copy_from_user() into csum_and_copy_from_user()
ia64: turn csum_partial_copy_from_user() into csum_and_copy_from_user()
ia64: csum_partial_copy_nocheck(): don't abuse csum_partial_copy_from_user()
x86: switch 32bit csum_and_copy_to_user() to user_access_{begin,end}()
x86: switch both 32bit and 64bit to providing csum_and_copy_from_user()
x86_64: csum_..._copy_..._user(): switch to unsafe_..._user()
get rid of csum_partial_copy_to_user()
Diffstat (limited to 'arch')
-rw-r--r-- | arch/alpha/include/asm/checksum.h | 3 | ||||
-rw-r--r-- | arch/alpha/lib/csum_partial_copy.c | 6 | ||||
-rw-r--r-- | arch/arm/include/asm/checksum.h | 14 | ||||
-rw-r--r-- | arch/c6x/lib/checksum.c | 22 | ||||
-rw-r--r-- | arch/ia64/include/asm/checksum.h | 10 | ||||
-rw-r--r-- | arch/ia64/lib/csum_partial_copy.c | 32 | ||||
-rw-r--r-- | arch/m68k/include/asm/checksum.h | 3 | ||||
-rw-r--r-- | arch/m68k/lib/checksum.c | 4 | ||||
-rw-r--r-- | arch/nios2/include/asm/checksum.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/checksum.h | 7 | ||||
-rw-r--r-- | arch/parisc/lib/checksum.c | 20 | ||||
-rw-r--r-- | arch/s390/include/asm/checksum.h | 19 | ||||
-rw-r--r-- | arch/sh/include/asm/checksum_32.h | 9 | ||||
-rw-r--r-- | arch/sparc/include/asm/checksum.h | 1 | ||||
-rw-r--r-- | arch/sparc/include/asm/checksum_32.h | 15 | ||||
-rw-r--r-- | arch/sparc/include/asm/checksum_64.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/checksum.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/checksum_32.h | 21 | ||||
-rw-r--r-- | arch/x86/include/asm/checksum_64.h | 12 | ||||
-rw-r--r-- | arch/x86/lib/csum-wrappers_64.c | 35 | ||||
-rw-r--r-- | arch/x86/um/asm/checksum.h | 20 | ||||
-rw-r--r-- | arch/xtensa/include/asm/checksum.h | 11 |
22 files changed, 84 insertions, 186 deletions
diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h index 473e6ccb65a3..0eac81624d01 100644 --- a/arch/alpha/include/asm/checksum.h +++ b/arch/alpha/include/asm/checksum.h @@ -41,7 +41,8 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); * here even more important to align src and dst on a 32-bit (or even * better 64-bit) boundary */ -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index e53f96e8aa6d..af1dad74e933 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -325,7 +325,7 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, } __wsum -csum_partial_copy_from_user(const void __user *src, void *dst, int len, +csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp) { unsigned long checksum = (__force u32) sum; @@ -369,7 +369,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len, } return (__force __wsum)checksum; } -EXPORT_SYMBOL(csum_partial_copy_from_user); +EXPORT_SYMBOL(csum_and_copy_from_user); __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) @@ -377,7 +377,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) __wsum checksum; mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); - checksum = csum_partial_copy_from_user((__force const void __user *)src, + checksum = csum_and_copy_from_user((__force const void __user *)src, dst, len, sum, NULL); set_fs(oldfs); return checksum; diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h index 20043e0ebb07..ed6073fee338 100644 --- a/arch/arm/include/asm/checksum.h +++ b/arch/arm/include/asm/checksum.h @@ -40,6 +40,20 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr); +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +static inline +__wsum csum_and_copy_from_user (const void __user *src, void *dst, + int len, __wsum sum, int *err_ptr) +{ + if (access_ok(src, len)) + return csum_partial_copy_from_user(src, dst, len, sum, err_ptr); + + if (len) + *err_ptr = -EFAULT; + + return sum; +} + /* * Fold a partial checksum without adding pseudo headers */ diff --git a/arch/c6x/lib/checksum.c b/arch/c6x/lib/checksum.c index 46940844c553..335ca4900808 100644 --- a/arch/c6x/lib/checksum.c +++ b/arch/c6x/lib/checksum.c @@ -4,28 +4,6 @@ #include <linux/module.h> #include <net/checksum.h> -#include <asm/byteorder.h> - -/* - * copy from fs while checksumming, otherwise like csum_partial - */ -__wsum -csum_partial_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *csum_err) -{ - int missing; - - missing = __copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); - *csum_err = -EFAULT; - } else - *csum_err = 0; - - return csum_partial(dst, len, sum); -} -EXPORT_SYMBOL(csum_partial_copy_from_user); - /* These are from csum_64plus.S */ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy); diff --git a/arch/ia64/include/asm/checksum.h b/arch/ia64/include/asm/checksum.h index 0ed18bc3f6cf..2a1c64629cdc 100644 --- a/arch/ia64/include/asm/checksum.h +++ b/arch/ia64/include/asm/checksum.h @@ -37,16 +37,6 @@ extern __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -/* - * Same as csum_partial, but copies from src while it checksums. - * - * Here it is even more important to align src and dst on a 32-bit (or - * even better 64-bit) boundary. - */ -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, - int *errp); - extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); diff --git a/arch/ia64/lib/csum_partial_copy.c b/arch/ia64/lib/csum_partial_copy.c index bf9396b1ed32..5d147a33d648 100644 --- a/arch/ia64/lib/csum_partial_copy.c +++ b/arch/ia64/lib/csum_partial_copy.c @@ -103,39 +103,11 @@ out: * This is very ugly but temporary. THIS NEEDS SERIOUS ENHANCEMENTS. * But it's very tricky to get right even in C. */ -extern unsigned long do_csum(const unsigned char *, long); - -__wsum -csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum psum, int *errp) -{ - unsigned long result; - - /* XXX Fixme - * for now we separate the copy from checksum for obvious - * alignment difficulties. Look at the Alpha code and you'll be - * scared. - */ - - if (__copy_from_user(dst, src, len) != 0 && errp) - *errp = -EFAULT; - - result = do_csum(dst, len); - - /* add in old sum, and carry.. */ - result += (__force u32)psum; - /* 32+c bits -> 32 bits */ - result = (result & 0xffffffff) + (result >> 32); - return (__force __wsum)result; -} - -EXPORT_SYMBOL(csum_partial_copy_from_user); - __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) { - return csum_partial_copy_from_user((__force const void __user *)src, - dst, len, sum, NULL); + memcpy(dst, src, len); + return csum_partial(dst, len, sum); } EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h index f9b94e4b94f9..3f2c15d6f18c 100644 --- a/arch/m68k/include/asm/checksum.h +++ b/arch/m68k/include/asm/checksum.h @@ -30,7 +30,8 @@ __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ -extern __wsum csum_partial_copy_from_user(const void __user *src, +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *csum_err); diff --git a/arch/m68k/lib/checksum.c b/arch/m68k/lib/checksum.c index 5fa3d392e181..31797be9a3dc 100644 --- a/arch/m68k/lib/checksum.c +++ b/arch/m68k/lib/checksum.c @@ -129,7 +129,7 @@ EXPORT_SYMBOL(csum_partial); */ __wsum -csum_partial_copy_from_user(const void __user *src, void *dst, +csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *csum_err) { /* @@ -316,7 +316,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, return(sum); } -EXPORT_SYMBOL(csum_partial_copy_from_user); +EXPORT_SYMBOL(csum_and_copy_from_user); /* diff --git a/arch/nios2/include/asm/checksum.h b/arch/nios2/include/asm/checksum.h index 703c5ee63421..ec39698d3bea 100644 --- a/arch/nios2/include/asm/checksum.h +++ b/arch/nios2/include/asm/checksum.h @@ -14,8 +14,6 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum); -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *csum_err); #define csum_partial_copy_nocheck(src, dst, len, sum) \ csum_partial_copy((src), (dst), (len), (sum)) diff --git a/arch/parisc/include/asm/checksum.h b/arch/parisc/include/asm/checksum.h index c1c22819a04d..fe8c63b2d2c3 100644 --- a/arch/parisc/include/asm/checksum.h +++ b/arch/parisc/include/asm/checksum.h @@ -27,13 +27,6 @@ extern __wsum csum_partial(const void *, int, __wsum); extern __wsum csum_partial_copy_nocheck(const void *, void *, int, __wsum); /* - * this is a new version of the above that records errors it finds in *errp, - * but continues and zeros the rest of the buffer. - */ -extern __wsum csum_partial_copy_from_user(const void __user *src, - void *dst, int len, __wsum sum, int *errp); - -/* * Optimized for IP headers, which always checksum on 4 octet boundaries. * * Written by Randolph Chung <tausq@debian.org>, and then mucked with by diff --git a/arch/parisc/lib/checksum.c b/arch/parisc/lib/checksum.c index 256322c7b648..c6f161583549 100644 --- a/arch/parisc/lib/checksum.c +++ b/arch/parisc/lib/checksum.c @@ -123,23 +123,3 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, return sum; } EXPORT_SYMBOL(csum_partial_copy_nocheck); - -/* - * Copy from userspace and compute checksum. If we catch an exception - * then zero the rest of the buffer. - */ -__wsum csum_partial_copy_from_user(const void __user *src, - void *dst, int len, - __wsum sum, int *err_ptr) -{ - int missing; - - missing = copy_from_user(dst, src, len); - if (missing) { - memset(dst + len - missing, 0, missing); - *err_ptr = -EFAULT; - } - - return csum_partial(dst, len, sum); -} -EXPORT_SYMBOL(csum_partial_copy_from_user); diff --git a/arch/s390/include/asm/checksum.h b/arch/s390/include/asm/checksum.h index 91e376b0d28c..6d01c96aeb5c 100644 --- a/arch/s390/include/asm/checksum.h +++ b/arch/s390/include/asm/checksum.h @@ -39,25 +39,6 @@ csum_partial(const void *buff, int len, __wsum sum) return sum; } -/* - * the same as csum_partial_copy, but copies from user space. - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - * - * Copy from userspace and compute checksum. - */ -static inline __wsum -csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, - int *err_ptr) -{ - if (unlikely(copy_from_user(dst, src, len))) - *err_ptr = -EFAULT; - return csum_partial(dst, len, sum); -} - - static inline __wsum csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) { diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h index 36b84cfd3f67..91571a42e44e 100644 --- a/arch/sh/include/asm/checksum_32.h +++ b/arch/sh/include/asm/checksum_32.h @@ -48,12 +48,17 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); } +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static inline -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - return csum_partial_copy_generic((__force const void *)src, dst, + if (access_ok(src, len)) + return csum_partial_copy_generic((__force const void *)src, dst, len, sum, err_ptr, NULL); + if (len) + *err_ptr = -EFAULT; + return sum; } /* diff --git a/arch/sparc/include/asm/checksum.h b/arch/sparc/include/asm/checksum.h index c3be56e2e768..a6256cb6fc5c 100644 --- a/arch/sparc/include/asm/checksum.h +++ b/arch/sparc/include/asm/checksum.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef ___ASM_SPARC_CHECKSUM_H #define ___ASM_SPARC_CHECKSUM_H +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER #if defined(__sparc__) && defined(__arch64__) #include <asm/checksum_64.h> #else diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h index 5fc98d80b03b..479a0b812af5 100644 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@ -60,7 +60,7 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) } static inline __wsum -csum_partial_copy_from_user(const void __user *src, void *dst, int len, +csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err) { register unsigned long ret asm("o0") = (unsigned long)src; @@ -68,6 +68,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len, register int l asm("g1") = len; register __wsum s asm("g7") = sum; + if (unlikely(!access_ok(src, len))) { + if (len) + *err = -EFAULT; + return sum; + } + __asm__ __volatile__ ( ".section __ex_table,#alloc\n\t" ".align 4\n\t" @@ -83,8 +89,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len, return (__force __wsum)ret; } +#define HAVE_CSUM_COPY_USER + static inline __wsum -csum_partial_copy_to_user(const void *src, void __user *dst, int len, +csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, int *err) { if (!access_ok(dst, len)) { @@ -113,9 +121,6 @@ csum_partial_copy_to_user(const void *src, void __user *dst, int len, } } -#define HAVE_CSUM_COPY_USER -#define csum_and_copy_to_user csum_partial_copy_to_user - /* ihl is always 5 or greater, almost always is 5, and iph is word aligned * the majority of the time. */ diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h index e52450930e4e..0fa4433f5662 100644 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@ -46,7 +46,7 @@ long __csum_partial_copy_from_user(const void __user *src, __wsum sum); static inline __wsum -csum_partial_copy_from_user(const void __user *src, +csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err) { diff --git a/arch/x86/include/asm/checksum.h b/arch/x86/include/asm/checksum.h index d79d1e622dcf..0ada98d5d09f 100644 --- a/arch/x86/include/asm/checksum.h +++ b/arch/x86/include/asm/checksum.h @@ -1,4 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 +#define HAVE_CSUM_COPY_USER #ifdef CONFIG_X86_32 # include <asm/checksum_32.h> #else diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h index f57b94e02c57..11624c8a9d8d 100644 --- a/arch/x86/include/asm/checksum_32.h +++ b/arch/x86/include/asm/checksum_32.h @@ -44,18 +44,21 @@ static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); } -static inline __wsum csum_partial_copy_from_user(const void __user *src, - void *dst, - int len, __wsum sum, - int *err_ptr) +static inline __wsum csum_and_copy_from_user(const void __user *src, + void *dst, int len, + __wsum sum, int *err_ptr) { __wsum ret; might_sleep(); - stac(); + if (!user_access_begin(src, len)) { + if (len) + *err_ptr = -EFAULT; + return sum; + } ret = csum_partial_copy_generic((__force void *)src, dst, len, sum, err_ptr, NULL); - clac(); + user_access_end(); return ret; } @@ -173,7 +176,6 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, /* * Copy and checksum to user */ -#define HAVE_CSUM_COPY_USER static inline __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum sum, @@ -182,11 +184,10 @@ static inline __wsum csum_and_copy_to_user(const void *src, __wsum ret; might_sleep(); - if (access_ok(dst, len)) { - stac(); + if (user_access_begin(dst, len)) { ret = csum_partial_copy_generic(src, (__force void *)dst, len, sum, NULL, err_ptr); - clac(); + user_access_end(); return ret; } diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h index 3ec6d3267cf9..0a289b87e872 100644 --- a/arch/x86/include/asm/checksum_64.h +++ b/arch/x86/include/asm/checksum_64.h @@ -129,27 +129,19 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, */ extern __wsum csum_partial(const void *buff, int len, __wsum sum); -#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER 1 -#define HAVE_CSUM_COPY_USER 1 - - /* Do not call this directly. Use the wrappers below */ extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst, int len, __wsum sum, int *src_err_ptr, int *dst_err_ptr); -extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst, +extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum isum, int *errp); -extern __wsum csum_partial_copy_to_user(const void *src, void __user *dst, +extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum isum, int *errp); extern __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); -/* Old names. To be removed. */ -#define csum_and_copy_to_user csum_partial_copy_to_user -#define csum_and_copy_from_user csum_partial_copy_from_user - /** * ip_compute_csum - Compute an 16bit IP checksum. * @buff: buffer address. diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c index c66c8b00f236..ee63d7576fd2 100644 --- a/arch/x86/lib/csum-wrappers_64.c +++ b/arch/x86/lib/csum-wrappers_64.c @@ -10,7 +10,7 @@ #include <asm/smap.h> /** - * csum_partial_copy_from_user - Copy and checksum from user space. + * csum_and_copy_from_user - Copy and checksum from user space. * @src: source address (user space) * @dst: destination address * @len: number of bytes to be copied. @@ -21,13 +21,13 @@ * src and dst are best aligned to 64bits. */ __wsum -csum_partial_copy_from_user(const void __user *src, void *dst, +csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum isum, int *errp) { might_sleep(); *errp = 0; - if (!likely(access_ok(src, len))) + if (!user_access_begin(src, len)) goto out_err; /* @@ -42,8 +42,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst, while (((unsigned long)src & 6) && len >= 2) { __u16 val16; - if (__get_user(val16, (const __u16 __user *)src)) - goto out_err; + unsafe_get_user(val16, (const __u16 __user *)src, out); *(__u16 *)dst = val16; isum = (__force __wsum)add32_with_carry( @@ -53,25 +52,26 @@ csum_partial_copy_from_user(const void __user *src, void *dst, len -= 2; } } - stac(); isum = csum_partial_copy_generic((__force const void *)src, dst, len, isum, errp, NULL); - clac(); + user_access_end(); if (unlikely(*errp)) goto out_err; return isum; +out: + user_access_end(); out_err: *errp = -EFAULT; memset(dst, 0, len); return isum; } -EXPORT_SYMBOL(csum_partial_copy_from_user); +EXPORT_SYMBOL(csum_and_copy_from_user); /** - * csum_partial_copy_to_user - Copy and checksum to user space. + * csum_and_copy_to_user - Copy and checksum to user space. * @src: source address * @dst: destination address (user space) * @len: number of bytes to be copied. @@ -82,14 +82,14 @@ EXPORT_SYMBOL(csum_partial_copy_from_user); * src and dst are best aligned to 64bits. */ __wsum -csum_partial_copy_to_user(const void *src, void __user *dst, +csum_and_copy_to_user(const void *src, void __user *dst, int len, __wsum isum, int *errp) { __wsum ret; might_sleep(); - if (unlikely(!access_ok(dst, len))) { + if (!user_access_begin(dst, len)) { *errp = -EFAULT; return 0; } @@ -100,9 +100,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst, isum = (__force __wsum)add32_with_carry( (__force unsigned)isum, val16); - *errp = __put_user(val16, (__u16 __user *)dst); - if (*errp) - return isum; + unsafe_put_user(val16, (__u16 __user *)dst, out); src += 2; dst += 2; len -= 2; @@ -110,13 +108,16 @@ csum_partial_copy_to_user(const void *src, void __user *dst, } *errp = 0; - stac(); ret = csum_partial_copy_generic(src, (void __force *)dst, len, isum, NULL, errp); - clac(); + user_access_end(); return ret; +out: + user_access_end(); + *errp = -EFAULT; + return isum; } -EXPORT_SYMBOL(csum_partial_copy_to_user); +EXPORT_SYMBOL(csum_and_copy_to_user); /** * csum_partial_copy_nocheck - Copy and checksum. diff --git a/arch/x86/um/asm/checksum.h b/arch/x86/um/asm/checksum.h index 2a56cac64687..ff6bba2c8ab6 100644 --- a/arch/x86/um/asm/checksum.h +++ b/arch/x86/um/asm/checksum.h @@ -36,26 +36,6 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, return csum_partial(dst, len, sum); } -/* - * the same as csum_partial, but copies from src while it - * checksums, and handles user-space pointer exceptions correctly, when needed. - * - * here even more important to align src and dst on a 32-bit (or even - * better 64-bit) boundary - */ - -static __inline__ -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, - int len, __wsum sum, int *err_ptr) -{ - if (copy_from_user(dst, src, len)) { - *err_ptr = -EFAULT; - return (__force __wsum)-1; - } - - return csum_partial(dst, len, sum); -} - /** * csum_fold - Fold and invert a 32bit checksum. * sum: 32bit unfolded sum diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h index 8b687176ad72..d8292cc9ebdf 100644 --- a/arch/xtensa/include/asm/checksum.h +++ b/arch/xtensa/include/asm/checksum.h @@ -44,8 +44,6 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, /* * Note: when you get a NULL pointer exception here this means someone * passed in an incorrect kernel address to one of these functions. - * - * If you use these functions directly please don't forget the access_ok(). */ static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst, @@ -54,12 +52,17 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); } +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER static inline -__wsum csum_partial_copy_from_user(const void __user *src, void *dst, +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { - return csum_partial_copy_generic((__force const void *)src, dst, + if (access_ok(dst, len)) + return csum_partial_copy_generic((__force const void *)src, dst, len, sum, err_ptr, NULL); + if (len) + *err_ptr = -EFAULT; + return sum; } /* |