summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2014-11-28 13:40:20 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2015-02-04 01:34:14 -0500
commit57be5bdad759b9dde8b0d0cc630782a1a4ac4b9f (patch)
tree12d1b9c40bd20aa5e5038382fd20da05f09b2881 /include/net
parentcacdc7d2f9fa42e29b650e2879df42ea7d7833c1 (diff)
downloadlinux-57be5bdad759b9dde8b0d0cc630782a1a4ac4b9f.tar.gz
linux-57be5bdad759b9dde8b0d0cc630782a1a4ac4b9f.tar.bz2
linux-57be5bdad759b9dde8b0d0cc630782a1a4ac4b9f.zip
ip: convert tcp_sendmsg() to iov_iter primitives
patch is actually smaller than it seems to be - most of it is unindenting the inner loop body in tcp_sendmsg() itself... the bit in tcp_input.c is going to get reverted very soon - that's what memcpy_from_msg() will become, but not in this commit; let's keep it reasonably contained... There's one potentially subtle change here: in case of short copy from userland, mainline tcp_send_syn_data() discards the skb it has allocated and falls back to normal path, where we'll send as much as possible after rereading the same data again. This patch trims SYN+data skb instead - that way we don't need to copy from the same place twice. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/sock.h18
1 files changed, 8 insertions, 10 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 15341499786c..1e45e599a3ab 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1803,27 +1803,25 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
}
static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
- char __user *from, char *to,
+ struct iov_iter *from, char *to,
int copy, int offset)
{
if (skb->ip_summed == CHECKSUM_NONE) {
- int err = 0;
- __wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
- if (err)
- return err;
+ __wsum csum = 0;
+ if (csum_and_copy_from_iter(to, copy, &csum, from) != copy)
+ return -EFAULT;
skb->csum = csum_block_add(skb->csum, csum, offset);
} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
- if (!access_ok(VERIFY_READ, from, copy) ||
- __copy_from_user_nocache(to, from, copy))
+ if (copy_from_iter_nocache(to, copy, from) != copy)
return -EFAULT;
- } else if (copy_from_user(to, from, copy))
+ } else if (copy_from_iter(to, copy, from) != copy)
return -EFAULT;
return 0;
}
static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
- char __user *from, int copy)
+ struct iov_iter *from, int copy)
{
int err, offset = skb->len;
@@ -1835,7 +1833,7 @@ static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
return err;
}
-static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
+static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from,
struct sk_buff *skb,
struct page *page,
int off, int copy)