diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-07 20:39:20 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-07 20:39:20 -0700 |
commit | 6a37e94009b1a76d415b2759755f5cc7854c4ff6 (patch) | |
tree | 0c070f5bb1ab399460554a27fc6cc91c02e50e64 /lib | |
parent | da029c11e6b12f321f36dac8771e833b65cec962 (diff) | |
parent | 09fc68dc66f7597bdc8898c991609a48f061bed5 (diff) | |
download | linux-stable-6a37e94009b1a76d415b2759755f5cc7854c4ff6.tar.gz linux-stable-6a37e94009b1a76d415b2759755f5cc7854c4ff6.tar.bz2 linux-stable-6a37e94009b1a76d415b2759755f5cc7854c4ff6.zip |
Merge branch 'uaccess-work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull iov_iter hardening from Al Viro:
"This is the iov_iter/uaccess/hardening pile.
For one thing, it trims the inline part of copy_to_user/copy_from_user
to the minimum that *does* need to be inlined - object size checks,
basically. For another, it sanitizes the checks for iov_iter
primitives. There are 4 groups of checks: access_ok(), might_fault(),
object size and KASAN.
- access_ok() had been verified by whoever had set the iov_iter up.
However, that has happened in a function far away, so proving that
there's no path to actual copying bypassing those checks is hard
and proving that iov_iter has not been buggered in the meanwhile is
also not pleasant. So we want those redone in actual
copyin/copyout.
- might_fault() is better off consolidated - we know whether it needs
to be checked as soon as we enter iov_iter primitive and observe
the iov_iter flavour. No need to wait until the copyin/copyout. The
call chains are short enough to make sure we won't miss anything -
in fact, it's more robust that way, since there are cases where we
do e.g. forced fault-in before getting to copyin/copyout. It's not
quite what we need to check (in particular, combination of
iovec-backed and set_fs(KERNEL_DS) is almost certainly a bug, not a
cause to skip checks), but that's for later series. For now let's
keep might_fault().
- KASAN checks belong in copyin/copyout - at the same level where
other iov_iter flavours would've hit them in memcpy().
- object size checks should apply to *all* iov_iter flavours, not
just iovec-backed ones.
There are two groups of primitives - one gets the kernel object
described as pointer + size (copy_to_iter(), etc.) while another gets
it as page + offset + size (copy_page_to_iter(), etc.)
For the first group the checks are best done where we actually have a
chance to find the object size. In other words, those belong in inline
wrappers in uio.h, before calling into iov_iter.c. Same kind as we
have for inlined part of copy_to_user().
For the second group there is no object to look at - offset in page is
just a number, it bears no type information. So we do them in the
common helper called by iov_iter.c primitives of that kind. All it
currently does is checking that we are not trying to access outside of
the compound page; eventually we might want to add some sanity checks
on the page involved.
So the things we need in copyin/copyout part of iov_iter.c do not
quite match anything in uaccess.h (we want no zeroing, we *do* want
access_ok() and KASAN and we want no might_fault() or object size
checks done on that level). OTOH, these needs are simple enough to
provide a couple of helpers (static in iov_iter.c) doing just what we
need..."
* 'uaccess-work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
iov_iter: saner checks on copyin/copyout
iov_iter: sanity checks for copy to/from page primitives
iov_iter/hardening: move object size checks to inlined part
copy_{to,from}_user(): consolidate object size checks
copy_{from,to}_user(): move kasan checks and might_fault() out-of-line
Diffstat (limited to 'lib')
-rw-r--r-- | lib/iov_iter.c | 98 | ||||
-rw-r--r-- | lib/usercopy.c | 10 |
2 files changed, 77 insertions, 31 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index c9a69064462f..52c8dd6d8e82 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -130,6 +130,24 @@ } \ } +static int copyout(void __user *to, const void *from, size_t n) +{ + if (access_ok(VERIFY_WRITE, to, n)) { + kasan_check_read(from, n); + n = raw_copy_to_user(to, from, n); + } + return n; +} + +static int copyin(void *to, const void __user *from, size_t n) +{ + if (access_ok(VERIFY_READ, from, n)) { + kasan_check_write(to, n); + n = raw_copy_from_user(to, from, n); + } + return n; +} + static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { @@ -144,6 +162,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b if (unlikely(!bytes)) return 0; + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; @@ -155,7 +174,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b from = kaddr + offset; /* first chunk, usually the only one */ - left = __copy_to_user_inatomic(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip += copy; from += copy; @@ -165,7 +184,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_to_user_inatomic(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip = copy; from += copy; @@ -184,7 +203,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b kaddr = kmap(page); from = kaddr + offset; - left = __copy_to_user(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip += copy; from += copy; @@ -193,7 +212,7 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_to_user(buf, from, copy); + left = copyout(buf, from, copy); copy -= left; skip = copy; from += copy; @@ -227,6 +246,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t if (unlikely(!bytes)) return 0; + might_fault(); wanted = bytes; iov = i->iov; skip = i->iov_offset; @@ -238,7 +258,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t to = kaddr + offset; /* first chunk, usually the only one */ - left = __copy_from_user_inatomic(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip += copy; to += copy; @@ -248,7 +268,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_from_user_inatomic(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip = copy; to += copy; @@ -267,7 +287,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t kaddr = kmap(page); to = kaddr + offset; - left = __copy_from_user(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip += copy; to += copy; @@ -276,7 +296,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t iov++; buf = iov->iov_base; copy = min(bytes, iov->iov_len); - left = __copy_from_user(to, buf, copy); + left = copyin(to, buf, copy); copy -= left; skip = copy; to += copy; @@ -535,14 +555,15 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes, return bytes; } -size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { const char *from = addr; if (unlikely(i->type & ITER_PIPE)) return copy_pipe_to_iter(addr, bytes, i); + if (iter_is_iovec(i)) + might_fault(); iterate_and_advance(i, bytes, v, - __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, - v.iov_len), + copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), memcpy_to_page(v.bv_page, v.bv_offset, (from += v.bv_len) - v.bv_len, v.bv_len), memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) @@ -550,18 +571,19 @@ size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_to_iter); +EXPORT_SYMBOL(_copy_to_iter); -size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } + if (iter_is_iovec(i)) + might_fault(); iterate_and_advance(i, bytes, v, - __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, - v.iov_len), + copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) @@ -569,9 +591,9 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_from_iter); +EXPORT_SYMBOL(_copy_from_iter); -bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) +bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -581,8 +603,10 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) if (unlikely(i->count < bytes)) return false; + if (iter_is_iovec(i)) + might_fault(); iterate_all_kinds(i, bytes, v, ({ - if (__copy_from_user((to += v.iov_len) - v.iov_len, + if (copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)) return false; 0;}), @@ -594,9 +618,9 @@ bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) iov_iter_advance(i, bytes); return true; } -EXPORT_SYMBOL(copy_from_iter_full); +EXPORT_SYMBOL(_copy_from_iter_full); -size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -613,10 +637,10 @@ size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL(copy_from_iter_nocache); +EXPORT_SYMBOL(_copy_from_iter_nocache); #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE -size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) +size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -634,10 +658,10 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) return bytes; } -EXPORT_SYMBOL_GPL(copy_from_iter_flushcache); +EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); #endif -bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) +bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) { char *to = addr; if (unlikely(i->type & ITER_PIPE)) { @@ -659,11 +683,22 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) iov_iter_advance(i, bytes); return true; } -EXPORT_SYMBOL(copy_from_iter_full_nocache); +EXPORT_SYMBOL(_copy_from_iter_full_nocache); + +static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) +{ + size_t v = n + offset; + if (likely(n <= v && v <= (PAGE_SIZE << compound_order(page)))) + return true; + WARN_ON(1); + return false; +} size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { + if (unlikely(!page_copy_sane(page, offset, bytes))) + return 0; if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); size_t wanted = copy_to_iter(kaddr + offset, bytes, i); @@ -679,13 +714,15 @@ EXPORT_SYMBOL(copy_page_to_iter); size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { + if (unlikely(!page_copy_sane(page, offset, bytes))) + return 0; if (unlikely(i->type & ITER_PIPE)) { WARN_ON(1); return 0; } if (i->type & (ITER_BVEC|ITER_KVEC)) { void *kaddr = kmap_atomic(page); - size_t wanted = copy_from_iter(kaddr + offset, bytes, i); + size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); kunmap_atomic(kaddr); return wanted; } else @@ -722,7 +759,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i) if (unlikely(i->type & ITER_PIPE)) return pipe_zero(bytes, i); iterate_and_advance(i, bytes, v, - __clear_user(v.iov_base, v.iov_len), + clear_user(v.iov_base, v.iov_len), memzero_page(v.bv_page, v.bv_offset, v.bv_len), memset(v.iov_base, 0, v.iov_len) ) @@ -735,14 +772,17 @@ size_t iov_iter_copy_from_user_atomic(struct page *page, struct iov_iter *i, unsigned long offset, size_t bytes) { char *kaddr = kmap_atomic(page), *p = kaddr + offset; + if (unlikely(!page_copy_sane(page, offset, bytes))) { + kunmap_atomic(kaddr); + return 0; + } if (unlikely(i->type & ITER_PIPE)) { kunmap_atomic(kaddr); WARN_ON(1); return 0; } iterate_all_kinds(i, bytes, v, - __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, - v.iov_base, v.iov_len), + copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, v.bv_offset, v.bv_len), memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) diff --git a/lib/usercopy.c b/lib/usercopy.c index 1b6010a3beb8..f5d9f08ee032 100644 --- a/lib/usercopy.c +++ b/lib/usercopy.c @@ -6,8 +6,11 @@ unsigned long _copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned long res = n; - if (likely(access_ok(VERIFY_READ, from, n))) + might_fault(); + if (likely(access_ok(VERIFY_READ, from, n))) { + kasan_check_write(to, n); res = raw_copy_from_user(to, from, n); + } if (unlikely(res)) memset(to + (n - res), 0, res); return res; @@ -18,8 +21,11 @@ EXPORT_SYMBOL(_copy_from_user); #ifndef INLINE_COPY_TO_USER unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n) { - if (likely(access_ok(VERIFY_WRITE, to, n))) + might_fault(); + if (likely(access_ok(VERIFY_WRITE, to, n))) { + kasan_check_read(from, n); n = raw_copy_to_user(to, from, n); + } return n; } EXPORT_SYMBOL(_copy_to_user); |