summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-04-22 12:26:16 +0000
committerDavid S. Miller <davem@davemloft.net>2012-04-23 23:01:35 -0400
commit41c73a0d44c902e92397552acce181295eaa448b (patch)
tree522ac16cf3c1db59ef21e30cf14177dea1a7ea24 /net/core
parent1402d366019fedaa2b024f2bac06b7cc9a8782e1 (diff)
downloadlinux-41c73a0d44c902e92397552acce181295eaa448b.tar.gz
linux-41c73a0d44c902e92397552acce181295eaa448b.tar.bz2
linux-41c73a0d44c902e92397552acce181295eaa448b.zip
net: speedup skb_splice_bits()
Commit 35f3d14db (pipe: add support for shrinking and growing pipes) added a slowdown for splice(socket -> pipe), as we might grow the spd used in skb_splice_bits() for each skb we process in splice() syscall. Its not needed since skb lengths are capped. The default on-stack arrays are more than enough. Use MAX_SKB_FRAGS instead of PIPE_DEF_BUFFERS to describe the reasonable limit per skb. Add coalescing support to help splicing of GRO skbs built from linear skbs (linked into frag_list) Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/skbuff.c30
1 files changed, 19 insertions, 11 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index bf257de95d26..dfb304066f22 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1581,11 +1581,20 @@ new_page:
memcpy(page_address(p) + off, page_address(page) + *offset, *len);
sk->sk_sndmsg_off += *len;
*offset = off;
- get_page(p);
return p;
}
+static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
+ struct page *page,
+ unsigned int offset)
+{
+ return spd->nr_pages &&
+ spd->pages[spd->nr_pages - 1] == page &&
+ (spd->partial[spd->nr_pages - 1].offset +
+ spd->partial[spd->nr_pages - 1].len == offset);
+}
+
/*
* Fill page/offset/length into spd, if it can hold more pages.
*/
@@ -1595,16 +1604,19 @@ static inline int spd_fill_page(struct splice_pipe_desc *spd,
struct sk_buff *skb, int linear,
struct sock *sk)
{
- if (unlikely(spd->nr_pages == pipe->buffers))
+ if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
return 1;
if (linear) {
page = linear_to_page(page, len, &offset, skb, sk);
if (!page)
return 1;
- } else
- get_page(page);
-
+ }
+ if (spd_can_coalesce(spd, page, offset)) {
+ spd->partial[spd->nr_pages - 1].len += *len;
+ return 0;
+ }
+ get_page(page);
spd->pages[spd->nr_pages] = page;
spd->partial[spd->nr_pages].len = *len;
spd->partial[spd->nr_pages].offset = offset;
@@ -1710,8 +1722,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct pipe_inode_info *pipe, unsigned int tlen,
unsigned int flags)
{
- struct partial_page partial[PIPE_DEF_BUFFERS];
- struct page *pages[PIPE_DEF_BUFFERS];
+ struct partial_page partial[MAX_SKB_FRAGS];
+ struct page *pages[MAX_SKB_FRAGS];
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
@@ -1723,9 +1735,6 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct sock *sk = skb->sk;
int ret = 0;
- if (splice_grow_spd(pipe, &spd))
- return -ENOMEM;
-
/*
* __skb_splice_bits() only fails if the output has no room left,
* so no point in going over the frag_list for the error case.
@@ -1761,7 +1770,6 @@ done:
lock_sock(sk);
}
- splice_shrink_spd(pipe, &spd);
return ret;
}