summaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2009-02-01 00:41:42 -0800
committerDavid S. Miller <davem@davemloft.net>2009-02-01 00:41:42 -0800
commit4fb669948116d928ae44262ab7743732c574630d (patch)
tree0be7481f6339fe4cb32faf4047f15f7cc962babc /net/core/skbuff.c
parent31f4574774e98aa275aeeee94f41ce042285ed8e (diff)
downloadlinux-4fb669948116d928ae44262ab7743732c574630d.tar.gz
linux-4fb669948116d928ae44262ab7743732c574630d.tar.bz2
linux-4fb669948116d928ae44262ab7743732c574630d.zip
net: Optimize memory usage when splicing from sockets.
The recent fix of data corruption when splicing from sockets uses memory very inefficiently allocating a new page to copy each chunk of linear part of skb. This patch uses the same page until it's full (almost) by caching the page in sk_sndmsg_page field. With changes from David S. Miller <davem@davemloft.net> Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c47
1 files changed, 36 insertions, 11 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index f20e758fe46b..e55d1ef5690d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1333,14 +1333,39 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
put_page(spd->pages[i]);
}
-static inline struct page *linear_to_page(struct page *page, unsigned int len,
- unsigned int offset)
-{
- struct page *p = alloc_pages(GFP_KERNEL, 0);
+static inline struct page *linear_to_page(struct page *page, unsigned int *len,
+ unsigned int *offset,
+ struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+ struct page *p = sk->sk_sndmsg_page;
+ unsigned int off;
+
+ if (!p) {
+new_page:
+ p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
+ if (!p)
+ return NULL;
- if (!p)
- return NULL;
- memcpy(page_address(p) + offset, page_address(page) + offset, len);
+ off = sk->sk_sndmsg_off = 0;
+ /* hold one ref to this page until it's full */
+ } else {
+ unsigned int mlen;
+
+ off = sk->sk_sndmsg_off;
+ mlen = PAGE_SIZE - off;
+ if (mlen < 64 && mlen < *len) {
+ put_page(p);
+ goto new_page;
+ }
+
+ *len = min_t(unsigned int, *len, mlen);
+ }
+
+ memcpy(page_address(p) + off, page_address(page) + *offset, *len);
+ sk->sk_sndmsg_off += *len;
+ *offset = off;
+ get_page(p);
return p;
}
@@ -1349,21 +1374,21 @@ static inline struct page *linear_to_page(struct page *page, unsigned int len,
* Fill page/offset/length into spd, if it can hold more pages.
*/
static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
- unsigned int len, unsigned int offset,
+ unsigned int *len, unsigned int offset,
struct sk_buff *skb, int linear)
{
if (unlikely(spd->nr_pages == PIPE_BUFFERS))
return 1;
if (linear) {
- page = linear_to_page(page, len, offset);
+ page = linear_to_page(page, len, &offset, skb);
if (!page)
return 1;
} else
get_page(page);
spd->pages[spd->nr_pages] = page;
- spd->partial[spd->nr_pages].len = len;
+ spd->partial[spd->nr_pages].len = *len;
spd->partial[spd->nr_pages].offset = offset;
spd->nr_pages++;
@@ -1405,7 +1430,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
/* the linear region may spread across several pages */
flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
- if (spd_fill_page(spd, page, flen, poff, skb, linear))
+ if (spd_fill_page(spd, page, &flen, poff, skb, linear))
return 1;
__segment_seek(&page, &poff, &plen, flen);