summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2019-02-26 10:42:39 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2019-05-22 07:37:45 +0200
commit627bb2d93b4d48f89e2087b3149c0a8444e5a1d4 (patch)
tree9b6acc584f8b33f2915d7b9c61edf279985b1905 /lib
parent866f011181ffb9f4da5044dda316bbac26c78819 (diff)
downloadlinux-stable-627bb2d93b4d48f89e2087b3149c0a8444e5a1d4.tar.gz
linux-stable-627bb2d93b4d48f89e2087b3149c0a8444e5a1d4.tar.bz2
linux-stable-627bb2d93b4d48f89e2087b3149c0a8444e5a1d4.zip
iov_iter: optimize page_copy_sane()
commit 6daef95b8c914866a46247232a048447fff97279 upstream. Avoid cache line miss dereferencing struct page if we can. page_copy_sane() mostly deals with order-0 pages. Extra cache line miss is visible on TCP recvmsg() calls dealing with GRO packets (typically 45 page frags are attached to one skb). Bringing the 45 struct pages into cpu cache while copying the data is not free, since the freeing of the skb (and associated page frags put_page()) can happen after cache lines have been evicted. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/iov_iter.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8be175df3075..acd7b97c16f2 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -817,8 +817,21 @@ EXPORT_SYMBOL(_copy_from_iter_full_nocache);
static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
{
- struct page *head = compound_head(page);
- size_t v = n + offset + page_address(page) - page_address(head);
+ struct page *head;
+ size_t v = n + offset;
+
+ /*
+ * The general case needs to access the page order in order
+ * to compute the page size.
+ * However, we mostly deal with order-0 pages and thus can
+ * avoid a possible cache line miss for requests that fit all
+ * page orders.
+ */
+ if (n <= v && v <= PAGE_SIZE)
+ return true;
+
+ head = compound_head(page);
+ v += (page - head) << PAGE_SHIFT;
if (likely(n <= v && v <= (PAGE_SIZE << compound_order(head))))
return true;