diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-09-29 16:28:52 -0700 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2015-09-29 16:28:52 -0700 |
commit | 8f697e574012cc73b6b0dcbf30d88a3a0f43b78f (patch) | |
tree | a6b16bcf33ad08e03837b61eed2fffb31118f1f3 /mm/filemap.c | |
parent | 221bcb24c6530be17468fdcdbf91299aba32a693 (diff) | |
parent | 9ffecb10283508260936b96022d4ee43a7798b4c (diff) | |
download | linux-8f697e574012cc73b6b0dcbf30d88a3a0f43b78f.tar.gz linux-8f697e574012cc73b6b0dcbf30d88a3a0f43b78f.tar.bz2 linux-8f697e574012cc73b6b0dcbf30d88a3a0f43b78f.zip |
Merge tag 'v4.3-rc3' into next
Merge with Linux 4.3-rc3 to bring in MFD DA9062 changes to merge DA9062
OnKey driver.
Diffstat (limited to 'mm/filemap.c')
-rw-r--r-- | mm/filemap.c | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 1283fc825458..72940fb38666 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -674,7 +674,7 @@ struct page *__page_cache_alloc(gfp_t gfp) do { cpuset_mems_cookie = read_mems_allowed_begin(); n = cpuset_mem_spread_node(); - page = alloc_pages_exact_node(n, gfp, 0); + page = __alloc_pages_node(n, gfp, 0); } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); return page; @@ -2473,21 +2473,6 @@ ssize_t generic_perform_write(struct file *file, iov_iter_count(i)); again: - /* - * Bring in the user page that we will copy from _first_. - * Otherwise there's a nasty deadlock on copying from the - * same page as we're writing to, without it being marked - * up-to-date. - * - * Not only is this an optimisation, but it is also required - * to check that the address is actually valid, when atomic - * usercopies are used, below. - */ - if (unlikely(iov_iter_fault_in_readable(i, bytes))) { - status = -EFAULT; - break; - } - status = a_ops->write_begin(file, mapping, pos, bytes, flags, &page, &fsdata); if (unlikely(status < 0)) @@ -2495,8 +2480,17 @@ again: if (mapping_writably_mapped(mapping)) flush_dcache_page(page); - + /* + * 'page' is now locked. If we are trying to copy from a + * mapping of 'page' in userspace, the copy might fault and + * would need PageUptodate() to complete. But, page can not be + * made Uptodate without acquiring the page lock, which we hold. + * Deadlock. Avoid with pagefault_disable(). Fix up below with + * iov_iter_fault_in_readable(). + */ + pagefault_disable(); copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); + pagefault_enable(); flush_dcache_page(page); status = a_ops->write_end(file, mapping, pos, bytes, copied, @@ -2519,6 +2513,14 @@ again: */ bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, iov_iter_single_seg_count(i)); + /* + * This is the fallback to recover if the copy from + * userspace above faults. + */ + if (unlikely(iov_iter_fault_in_readable(i, bytes))) { + status = -EFAULT; + break; + } goto again; } pos += copied; |