summaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-05 13:47:48 -0800
committerDavid S. Miller <davem@davemloft.net>2015-03-06 21:50:55 -0500
commit58025e46ea2d39f1840d5b1be5edea3297cfd23f (patch)
treecf0593edf6cac7b70da9213139ae481a013381c2 /net/core/skbuff.c
parentfeb27d155dfabcb8baabe10367f25fc2b1bcede1 (diff)
downloadlinux-58025e46ea2d39f1840d5b1be5edea3297cfd23f.tar.gz
linux-58025e46ea2d39f1840d5b1be5edea3297cfd23f.tar.bz2
linux-58025e46ea2d39f1840d5b1be5edea3297cfd23f.zip
net: gro: remove obsolete code from skb_gro_receive()
Some drivers use copybreak to copy tiny frames into smaller skb, and this smaller skb might not have skb->head_frag set for various reasons. skb_gro_receive() currently doesn't allow to aggregate the smaller skb into the previous GRO packet if this GRO packet has at least 2 MSS in it. Following workload easily demonstrates the problem. netperf -t TCP_RR -H target -- -r 3000,3000 (tcpdump shows one GRO packet with 2 MSS, plus one additional packet of 104 bytes that should have been appended.) It turns out that we can remove code from skb_gro_receive(), because commit 8a29111c7ca6 ("net: gro: allow to build full sized skb") and its followups removed the assumption that a GRO packet with a frag_list had to have an empty head. Removing this code allows the aggregation of the last (incomplete) frame in some RPC workloads. Note that tcp_gro_receive() already takes care of forcing a flush if necessary, including this case. If we want to avoid using frag_list in the first place (in forwarding workloads for example, as the outgoing NIC is generally not able to cope with skbs having a frag_list), we need to address this separately. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c45
1 files changed, 1 insertions, 44 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 913b94a77060..47c32413d5b9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3206,10 +3206,9 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
unsigned int offset = skb_gro_offset(skb);
unsigned int headlen = skb_headlen(skb);
- struct sk_buff *nskb, *lp, *p = *head;
unsigned int len = skb_gro_len(skb);
+ struct sk_buff *lp, *p = *head;
unsigned int delta_truesize;
- unsigned int headroom;
if (unlikely(p->len + len >= 65536))
return -E2BIG;
@@ -3276,48 +3275,6 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
goto done;
}
- /* switch back to head shinfo */
- pinfo = skb_shinfo(p);
-
- if (pinfo->frag_list)
- goto merge;
- if (skb_gro_len(p) != pinfo->gso_size)
- return -E2BIG;
-
- headroom = skb_headroom(p);
- nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
- if (unlikely(!nskb))
- return -ENOMEM;
-
- __copy_skb_header(nskb, p);
- nskb->mac_len = p->mac_len;
-
- skb_reserve(nskb, headroom);
- __skb_put(nskb, skb_gro_offset(p));
-
- skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
- skb_set_network_header(nskb, skb_network_offset(p));
- skb_set_transport_header(nskb, skb_transport_offset(p));
-
- __skb_pull(p, skb_gro_offset(p));
- memcpy(skb_mac_header(nskb), skb_mac_header(p),
- p->data - skb_mac_header(p));
-
- skb_shinfo(nskb)->frag_list = p;
- skb_shinfo(nskb)->gso_size = pinfo->gso_size;
- pinfo->gso_size = 0;
- __skb_header_release(p);
- NAPI_GRO_CB(nskb)->last = p;
-
- nskb->data_len += p->len;
- nskb->truesize += p->truesize;
- nskb->len += p->len;
-
- *head = nskb;
- nskb->next = p->next;
- p->next = NULL;
-
- p = nskb;
merge:
delta_truesize = skb->truesize;