diff options
author | Arjun Roy <arjunroy@google.com> | 2020-12-02 14:53:45 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2020-12-04 13:40:52 -0800 |
commit | 98917cf0d6eda01e8c3c34d35398d46b247b6fd3 (patch) | |
tree | 2be51861e1f38a30e729c63e343e9db4b68d68af /net/ipv4/tcp.c | |
parent | 7fba5309efe24e4f0284ef4b8663cdf401035e72 (diff) | |
download | linux-98917cf0d6eda01e8c3c34d35398d46b247b6fd3.tar.gz linux-98917cf0d6eda01e8c3c34d35398d46b247b6fd3.tar.bz2 linux-98917cf0d6eda01e8c3c34d35398d46b247b6fd3.zip |
net-zerocopy: Refactor frag-is-remappable test.
Refactor frag-is-remappable test for tcp receive zerocopy. This is
part of a patch set that introduces short-circuited hybrid copies
for small receive operations, which results in roughly 33% fewer
syscalls for small RPC scenarios.
Signed-off-by: Arjun Roy <arjunroy@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 34 |
1 files changed, 26 insertions, 8 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8372d31f4a88..aa1040b5a977 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1780,6 +1780,26 @@ static skb_frag_t *skb_advance_to_frag(struct sk_buff *skb, u32 offset_skb, return frag; } +static bool can_map_frag(const skb_frag_t *frag) +{ + return skb_frag_size(frag) == PAGE_SIZE && !skb_frag_off(frag); +} + +static int find_next_mappable_frag(const skb_frag_t *frag, + int remaining_in_skb) +{ + int offset = 0; + + if (likely(can_map_frag(frag))) + return 0; + + while (offset < remaining_in_skb && !can_map_frag(frag)) { + offset += skb_frag_size(frag); + ++frag; + } + return offset; +} + static int tcp_copy_straggler_data(struct tcp_zerocopy_receive *zc, struct sk_buff *skb, u32 copylen, u32 *offset, u32 *seq) @@ -1905,6 +1925,8 @@ static int tcp_zerocopy_receive(struct sock *sk, ret = 0; curr_addr = address; while (length + PAGE_SIZE <= zc->length) { + int mappable_offset; + if (zc->recv_skip_hint < PAGE_SIZE) { u32 offset_frag; @@ -1932,15 +1954,11 @@ static int tcp_zerocopy_receive(struct sock *sk, if (!frags || offset_frag) break; } - if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) { - int remaining = zc->recv_skip_hint; - while (remaining && (skb_frag_size(frags) != PAGE_SIZE || - skb_frag_off(frags))) { - remaining -= skb_frag_size(frags); - frags++; - } - zc->recv_skip_hint -= remaining; + mappable_offset = find_next_mappable_frag(frags, + zc->recv_skip_hint); + if (mappable_offset) { + zc->recv_skip_hint = mappable_offset; break; } pages[pg_idx] = skb_frag_page(frags); |