diff options
Diffstat (limited to 'net/ipv4')
50 files changed, 883 insertions, 1124 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index aa2a2c79776f..d183262943d9 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -409,7 +409,7 @@ config INET_TCP_DIAG config INET_UDP_DIAG tristate "UDP: socket monitoring interface" - depends on INET_DIAG + depends on INET_DIAG && (IPV6 || IPV6=n) default n ---help--- Support for UDP socket monitoring interface used by the ss tool. diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f7b5670744f0..fdf49fd44bb4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -65,6 +65,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "IPv4: " fmt + #include <linux/err.h> #include <linux/errno.h> #include <linux/types.h> @@ -381,6 +383,7 @@ lookup_protocol: inet->mc_all = 1; inet->mc_index = 0; inet->mc_list = NULL; + inet->rcv_tos = 0; sk_refcnt_debug_inc(sk); @@ -1084,13 +1087,11 @@ out: return; out_permanent: - printk(KERN_ERR "Attempt to override permanent protocol %d.\n", - protocol); + pr_err("Attempt to override permanent protocol %d\n", protocol); goto out; out_illegal: - printk(KERN_ERR - "Ignoring attempt to register invalid socket type %d.\n", + pr_err("Ignoring attempt to register invalid socket type %d\n", p->type); goto out; } @@ -1099,8 +1100,7 @@ EXPORT_SYMBOL(inet_register_protosw); void inet_unregister_protosw(struct inet_protosw *p) { if (INET_PROTOSW_PERMANENT & p->flags) { - printk(KERN_ERR - "Attempt to unregister permanent protocol %d.\n", + pr_err("Attempt to unregister permanent protocol %d\n", p->protocol); } else { spin_lock_bh(&inetsw_lock); @@ -1149,8 +1149,8 @@ static int inet_sk_reselect_saddr(struct sock *sk) return 0; if (sysctl_ip_dynaddr > 1) { - printk(KERN_INFO "%s(): shifting inet->saddr from %pI4 to %pI4\n", - __func__, &old_saddr, &new_saddr); + pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n", + __func__, &old_saddr, &new_saddr); } inet->inet_saddr = inet->inet_rcv_saddr = new_saddr; @@ -1679,14 +1679,14 @@ static int __init inet_init(void) */ if (inet_add_protocol(&icmp_protocol, IPPROTO_ICMP) < 0) - printk(KERN_CRIT "inet_init: Cannot add ICMP protocol\n"); + pr_crit("%s: Cannot add ICMP protocol\n", __func__); if (inet_add_protocol(&udp_protocol, IPPROTO_UDP) < 0) - printk(KERN_CRIT "inet_init: Cannot add UDP protocol\n"); + pr_crit("%s: Cannot add UDP protocol\n", __func__); if (inet_add_protocol(&tcp_protocol, IPPROTO_TCP) < 0) - printk(KERN_CRIT "inet_init: Cannot add TCP protocol\n"); + pr_crit("%s: Cannot add TCP protocol\n", __func__); #ifdef CONFIG_IP_MULTICAST if (inet_add_protocol(&igmp_protocol, IPPROTO_IGMP) < 0) - printk(KERN_CRIT "inet_init: Cannot add IGMP protocol\n"); + pr_crit("%s: Cannot add IGMP protocol\n", __func__); #endif /* Register the socket-side information for inet_create. */ @@ -1733,14 +1733,14 @@ static int __init inet_init(void) */ #if defined(CONFIG_IP_MROUTE) if (ip_mr_init()) - printk(KERN_CRIT "inet_init: Cannot init ipv4 mroute\n"); + pr_crit("%s: Cannot init ipv4 mroute\n", __func__); #endif /* * Initialise per-cpu ipv4 mibs */ if (init_ipv4_mibs()) - printk(KERN_CRIT "inet_init: Cannot init ipv4 mibs\n"); + pr_crit("%s: Cannot init ipv4 mibs\n", __func__); ipv4_proc_init(); diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c index 36d14406261e..fd508b526014 100644 --- a/net/ipv4/ah4.c +++ b/net/ipv4/ah4.c @@ -1,3 +1,5 @@ +#define pr_fmt(fmt) "IPsec: " fmt + #include <crypto/hash.h> #include <linux/err.h> #include <linux/module.h> @@ -445,9 +447,10 @@ static int ah_init_state(struct xfrm_state *x) if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_ahash_digestsize(ahash)) { - printk(KERN_INFO "AH: %s digestsize %u != %hu\n", - x->aalg->alg_name, crypto_ahash_digestsize(ahash), - aalg_desc->uinfo.auth.icv_fullbits/8); + pr_info("%s: %s digestsize %u != %hu\n", + __func__, x->aalg->alg_name, + crypto_ahash_digestsize(ahash), + aalg_desc->uinfo.auth.icv_fullbits / 8); goto error; } @@ -510,11 +513,11 @@ static const struct net_protocol ah4_protocol = { static int __init ah4_init(void) { if (xfrm_register_type(&ah_type, AF_INET) < 0) { - printk(KERN_INFO "ip ah init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) { - printk(KERN_INFO "ip ah init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ah_type, AF_INET); return -EAGAIN; } @@ -524,9 +527,9 @@ static int __init ah4_init(void) static void __exit ah4_fini(void) { if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0) - printk(KERN_INFO "ip ah close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ah_type, AF_INET) < 0) - printk(KERN_INFO "ip ah close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ah4_init); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 59402be133f0..73f46d691abc 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -863,7 +863,8 @@ static int arp_process(struct sk_buff *skb) if (addr_type == RTN_UNICAST && (arp_fwd_proxy(in_dev, dev, rt) || arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || - pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { + (rt->dst.dev != dev && + pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) { n = neigh_event_ns(&arp_tbl, sha, &sip, dev); if (n) neigh_release(n); @@ -888,7 +889,7 @@ static int arp_process(struct sk_buff *skb) n = __neigh_lookup(&arp_tbl, &sip, dev, 0); - if (IPV4_DEVCONF_ALL(dev_net(dev), ARP_ACCEPT)) { + if (IN_DEV_ARP_ACCEPT(in_dev)) { /* Unsolicited ARP is not accepted by default. It is possible, that this option should be enabled for some devices (strip is candidate) diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 86f3b885b4f3..c48adc565e92 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1857,11 +1857,6 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len, return CIPSO_V4_HDR_LEN + ret_val; } -static void opt_kfree_rcu(struct rcu_head *head) -{ - kfree(container_of(head, struct ip_options_rcu, rcu)); -} - /** * cipso_v4_sock_setattr - Add a CIPSO option to a socket * @sk: the socket @@ -1938,7 +1933,7 @@ int cipso_v4_sock_setattr(struct sock *sk, } rcu_assign_pointer(sk_inet->inet_opt, opt); if (old) - call_rcu(&old->rcu, opt_kfree_rcu); + kfree_rcu(old, rcu); return 0; @@ -2005,7 +2000,7 @@ int cipso_v4_req_setattr(struct request_sock *req, req_inet = inet_rsk(req); opt = xchg(&req_inet->opt, opt); if (opt) - call_rcu(&opt->rcu, opt_kfree_rcu); + kfree_rcu(opt, rcu); return 0; @@ -2075,7 +2070,7 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) * remove the entire option struct */ *opt_ptr = NULL; hdr_delta = opt->opt.optlen; - call_rcu(&opt->rcu, opt_kfree_rcu); + kfree_rcu(opt, rcu); } return hdr_delta; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index a5b413416da3..89a47b35905d 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -1,3 +1,5 @@ +#define pr_fmt(fmt) "IPsec: " fmt + #include <crypto/aead.h> #include <crypto/authenc.h> #include <linux/err.h> @@ -706,11 +708,11 @@ static const struct net_protocol esp4_protocol = { static int __init esp4_init(void) { if (xfrm_register_type(&esp_type, AF_INET) < 0) { - printk(KERN_INFO "ip esp init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) { - printk(KERN_INFO "ip esp init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&esp_type, AF_INET); return -EAGAIN; } @@ -720,9 +722,9 @@ static int __init esp4_init(void) static void __exit esp4_fini(void) { if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0) - printk(KERN_INFO "ip esp close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&esp_type, AF_INET) < 0) - printk(KERN_INFO "ip esp close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } module_init(esp4_init); diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 92fc5f69f5da..76e72bacc217 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -695,7 +695,7 @@ void fib_add_ifaddr(struct in_ifaddr *ifa) if (ifa->ifa_flags & IFA_F_SECONDARY) { prim = inet_ifa_byprefix(in_dev, prefix, mask); if (prim == NULL) { - printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n"); + pr_warn("%s: bug: prim == NULL\n", __func__); return; } } @@ -749,11 +749,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim) if (ifa->ifa_flags & IFA_F_SECONDARY) { prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask); if (prim == NULL) { - printk(KERN_WARNING "fib_del_ifaddr: bug: prim == NULL\n"); + pr_warn("%s: bug: prim == NULL\n", __func__); return; } if (iprim && iprim != prim) { - printk(KERN_WARNING "fib_del_ifaddr: bug: iprim != prim\n"); + pr_warn("%s: bug: iprim != prim\n", __func__); return; } } else if (!ipv4_is_zeronet(any) && diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 80106d89d548..a8c5c1d6715b 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -154,7 +154,7 @@ static void free_fib_info_rcu(struct rcu_head *head) void free_fib_info(struct fib_info *fi) { if (fi->fib_dead == 0) { - pr_warning("Freeing alive fib_info %p\n", fi); + pr_warn("Freeing alive fib_info %p\n", fi); return; } change_nexthops(fi) { diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 2b555a5521e0..da9b9cb2282d 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1170,9 +1170,8 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen) } if (tp && tp->pos + tp->bits > 32) - pr_warning("fib_trie" - " tp=%p pos=%d, bits=%d, key=%0x plen=%d\n", - tp, tp->pos, tp->bits, key, plen); + pr_warn("fib_trie tp=%p pos=%d, bits=%d, key=%0x plen=%d\n", + tp, tp->pos, tp->bits, key, plen); /* Rebalance the trie */ diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c index 8cb1ebb7cd74..42a491055c76 100644 --- a/net/ipv4/gre.c +++ b/net/ipv4/gre.c @@ -10,6 +10,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/kmod.h> @@ -118,10 +120,10 @@ static const struct net_protocol net_gre_protocol = { static int __init gre_init(void) { - pr_info("GRE over IPv4 demultiplexor driver"); + pr_info("GRE over IPv4 demultiplexor driver\n"); if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { - pr_err("gre: can't add protocol\n"); + pr_err("can't add protocol\n"); return -EAGAIN; } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index ab188ae12fd9..9664d353ccd8 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -62,6 +62,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/types.h> #include <linux/jiffies.h> @@ -670,7 +672,7 @@ static void icmp_unreach(struct sk_buff *skb) break; case ICMP_FRAG_NEEDED: if (ipv4_config.no_pmtu_disc) { - LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: fragmentation needed and DF set.\n", + LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"), &iph->daddr); } else { info = ip_rt_frag_needed(net, iph, @@ -681,7 +683,7 @@ static void icmp_unreach(struct sk_buff *skb) } break; case ICMP_SR_FAILED: - LIMIT_NETDEBUG(KERN_INFO "ICMP: %pI4: Source Route Failed.\n", + LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: Source Route Failed\n"), &iph->daddr); break; default: @@ -713,13 +715,10 @@ static void icmp_unreach(struct sk_buff *skb) if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && inet_addr_type(net, iph->daddr) == RTN_BROADCAST) { if (net_ratelimit()) - printk(KERN_WARNING "%pI4 sent an invalid ICMP " - "type %u, code %u " - "error to a broadcast: %pI4 on %s\n", - &ip_hdr(skb)->saddr, - icmph->type, icmph->code, - &iph->daddr, - skb->dev->name); + pr_warn("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", + &ip_hdr(skb)->saddr, + icmph->type, icmph->code, + &iph->daddr, skb->dev->name); goto out; } @@ -946,8 +945,8 @@ static void icmp_address_reply(struct sk_buff *skb) break; } if (!ifa && net_ratelimit()) { - printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", - mp, dev->name, &ip_hdr(skb)->saddr); + pr_info("Wrong address mask %pI4 from %s/%pI4\n", + mp, dev->name, &ip_hdr(skb)->saddr); } } } diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index fcf281819cd4..8d25a1c557eb 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -960,9 +960,12 @@ static int inet_diag_rcv_msg_compat(struct sk_buff *skb, struct nlmsghdr *nlh) inet_diag_bc_audit(nla_data(attr), nla_len(attr))) return -EINVAL; } - - return netlink_dump_start(sock_diag_nlsk, skb, nlh, - inet_diag_dump_compat, NULL, 0); + { + struct netlink_dump_control c = { + .dump = inet_diag_dump_compat, + }; + return netlink_dump_start(sock_diag_nlsk, skb, nlh, &c); + } } return inet_diag_get_exact_compat(skb, nlh); @@ -985,9 +988,12 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) inet_diag_bc_audit(nla_data(attr), nla_len(attr))) return -EINVAL; } - - return netlink_dump_start(sock_diag_nlsk, skb, h, - inet_diag_dump, NULL, 0); + { + struct netlink_dump_control c = { + .dump = inet_diag_dump, + }; + return netlink_dump_start(sock_diag_nlsk, skb, h, &c); + } } return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index bf4a9c4808e1..d4d61b694fab 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/net.h> +#include <linux/workqueue.h> #include <net/ip.h> #include <net/inetpeer.h> #include <net/secure_seq.h> @@ -66,6 +67,11 @@ static struct kmem_cache *peer_cachep __read_mostly; +static LIST_HEAD(gc_list); +static const int gc_delay = 60 * HZ; +static struct delayed_work gc_work; +static DEFINE_SPINLOCK(gc_lock); + #define node_height(x) x->avl_height #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) @@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ +static void inetpeer_gc_worker(struct work_struct *work) +{ + struct inet_peer *p, *n; + LIST_HEAD(list); + + spin_lock_bh(&gc_lock); + list_replace_init(&gc_list, &list); + spin_unlock_bh(&gc_lock); + + if (list_empty(&list)) + return; + + list_for_each_entry_safe(p, n, &list, gc_list) { + + if(need_resched()) + cond_resched(); + + if (p->avl_left != peer_avl_empty) { + list_add_tail(&p->avl_left->gc_list, &list); + p->avl_left = peer_avl_empty; + } + + if (p->avl_right != peer_avl_empty) { + list_add_tail(&p->avl_right->gc_list, &list); + p->avl_right = peer_avl_empty; + } + + n = list_entry(p->gc_list.next, struct inet_peer, gc_list); + + if (!atomic_read(&p->refcnt)) { + list_del(&p->gc_list); + kmem_cache_free(peer_cachep, p); + } + } + + if (list_empty(&list)) + return; + + spin_lock_bh(&gc_lock); + list_splice(&list, &gc_list); + spin_unlock_bh(&gc_lock); + + schedule_delayed_work(&gc_work, gc_delay); +} /* Called from ip_output.c:ip_init */ void __init inet_initpeers(void) @@ -126,6 +176,7 @@ void __init inet_initpeers(void) 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); + INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker); } static int addr_compare(const struct inetpeer_addr *a, @@ -447,9 +498,8 @@ relookup: p->rate_last = 0; p->pmtu_expires = 0; p->pmtu_orig = 0; - p->redirect_genid = 0; memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); - + INIT_LIST_HEAD(&p->gc_list); /* Link the node. */ link_to_pool(p, base); @@ -509,3 +559,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) return rc; } EXPORT_SYMBOL(inet_peer_xrlim_allow); + +void inetpeer_invalidate_tree(int family) +{ + struct inet_peer *old, *new, *prev; + struct inet_peer_base *base = family_to_base(family); + + write_seqlock_bh(&base->lock); + + old = base->root; + if (old == peer_avl_empty_rcu) + goto out; + + new = peer_avl_empty_rcu; + + prev = cmpxchg(&base->root, old, new); + if (prev == old) { + base->total = 0; + spin_lock(&gc_lock); + list_add_tail(&prev->gc_list, &gc_list); + spin_unlock(&gc_lock); + schedule_delayed_work(&gc_work, gc_delay); + } + +out: + write_sequnlock_bh(&base->lock); +} +EXPORT_SYMBOL(inetpeer_invalidate_tree); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 1f23a57aa9e6..3727e234c884 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -20,6 +20,8 @@ * Patrick McHardy : LRU queue of frag heads for evictor. */ +#define pr_fmt(fmt) "IPv4: " fmt + #include <linux/compiler.h> #include <linux/module.h> #include <linux/types.h> @@ -299,7 +301,7 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) return container_of(q, struct ipq, q); out_nomem: - LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n"); + LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); return NULL; } @@ -637,14 +639,13 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, return 0; out_nomem: - LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " - "queue %p\n", qp); + LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"), + qp); err = -ENOMEM; goto out_fail; out_oversize: if (net_ratelimit()) - printk(KERN_INFO "Oversized IP packet from %pI4.\n", - &qp->saddr); + pr_info("Oversized IP packet from %pI4\n", &qp->saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 6b3ca5ba4450..b57532d4742c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -10,6 +10,8 @@ * */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/capability.h> #include <linux/module.h> #include <linux/types.h> @@ -65,7 +67,7 @@ it is infeasible task. The most general solutions would be to keep skb->encapsulation counter (sort of local ttl), and silently drop packet when it expires. It is a good - solution, but it supposes maintaing new variable in ALL + solution, but it supposes maintaining new variable in ALL skb, even if no tunneling is used. Current solution: xmit_recursion breaks dead loops. This is a percpu @@ -91,14 +93,14 @@ One of them is to parse packet trying to detect inner encapsulation made by our node. It is difficult or even impossible, especially, - taking into account fragmentation. TO be short, tt is not solution at all. + taking into account fragmentation. TO be short, ttl is not solution at all. Current solution: The solution was UNEXPECTEDLY SIMPLE. We force DF flag on tunnels with preconfigured hop limit, that is ALL. :-) Well, it does not remove the problem completely, but exponential growth of network traffic is changed to linear (branches, that exceed pmtu are pruned) and tunnel mtu - fastly degrades to value <68, where looping stops. + rapidly degrades to value <68, where looping stops. Yes, it is not good if there exists a router in the loop, which does not force DF, even when encapsulating packets have DF set. But it is not our problem! Nobody could accuse us, we made @@ -457,8 +459,8 @@ static void ipgre_err(struct sk_buff *skb, u32 info) GRE tunnels with enabled checksum. Tell them "thank you". Well, I wonder, rfc1812 was written by Cisco employee, - what the hell these idiots break standrads established - by themself??? + what the hell these idiots break standards established + by themselves??? */ const struct iphdr *iph = (const struct iphdr *)skb->data; @@ -730,15 +732,16 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev if (skb->protocol == htons(ETH_P_IP)) { rt = skb_rtable(skb); - if ((dst = rt->rt_gateway) == 0) - goto tx_error_icmp; + dst = rt->rt_gateway; } #if IS_ENABLED(CONFIG_IPV6) else if (skb->protocol == htons(ETH_P_IPV6)) { - struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb)); const struct in6_addr *addr6; + struct neighbour *neigh; + bool do_tx_error_icmp; int addr_type; + neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr); if (neigh == NULL) goto tx_error; @@ -751,9 +754,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev } if ((addr_type & IPV6_ADDR_COMPATv4) == 0) + do_tx_error_icmp = true; + else { + do_tx_error_icmp = false; + dst = addr6->s6_addr32[3]; + } + neigh_release(neigh); + if (do_tx_error_icmp) goto tx_error_icmp; - - dst = addr6->s6_addr32[3]; } #endif else @@ -914,9 +922,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev __IPTUNNEL_XMIT(tstats, &dev->stats); return NETDEV_TX_OK; +#if IS_ENABLED(CONFIG_IPV6) tx_error_icmp: dst_link_failure(skb); - +#endif tx_error: dev->stats.tx_errors++; dev_kfree_skb(skb); @@ -1529,7 +1538,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nla return -EEXIST; if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) - random_ether_addr(dev->dev_addr); + eth_hw_addr_random(dev); mtu = ipgre_tunnel_bind_dev(dev); if (!tb[IFLA_MTU]) @@ -1709,7 +1718,7 @@ static int __init ipgre_init(void) { int err; - printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); + pr_info("GRE over IPv4 tunneling driver\n"); err = register_pernet_device(&ipgre_net_ops); if (err < 0) @@ -1717,7 +1726,7 @@ static int __init ipgre_init(void) err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); if (err < 0) { - printk(KERN_INFO "ipgre init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); goto add_proto_failed; } @@ -1746,7 +1755,7 @@ static void __exit ipgre_fini(void) rtnl_link_unregister(&ipgre_tap_ops); rtnl_link_unregister(&ipgre_link_ops); if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) - printk(KERN_INFO "ipgre close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); unregister_pernet_device(&ipgre_net_ops); } diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 073a9b01c40c..f3f1108940f5 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -113,6 +113,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "IPv4: " fmt + #include <asm/system.h> #include <linux/module.h> #include <linux/types.h> @@ -148,7 +150,7 @@ /* * Process Router Attention IP option (RFC 2113) */ -int ip_call_ra_chain(struct sk_buff *skb) +bool ip_call_ra_chain(struct sk_buff *skb) { struct ip_ra_chain *ra; u8 protocol = ip_hdr(skb)->protocol; @@ -167,7 +169,7 @@ int ip_call_ra_chain(struct sk_buff *skb) net_eq(sock_net(sk), dev_net(dev))) { if (ip_is_fragment(ip_hdr(skb))) { if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) - return 1; + return true; } if (last) { struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); @@ -180,9 +182,9 @@ int ip_call_ra_chain(struct sk_buff *skb) if (last) { raw_rcv(last, skb); - return 1; + return true; } - return 0; + return false; } static int ip_local_deliver_finish(struct sk_buff *skb) @@ -265,7 +267,7 @@ int ip_local_deliver(struct sk_buff *skb) ip_local_deliver_finish); } -static inline int ip_rcv_options(struct sk_buff *skb) +static inline bool ip_rcv_options(struct sk_buff *skb) { struct ip_options *opt; const struct iphdr *iph; @@ -299,8 +301,8 @@ static inline int ip_rcv_options(struct sk_buff *skb) if (!IN_DEV_SOURCE_ROUTE(in_dev)) { if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - printk(KERN_INFO "source route option %pI4 -> %pI4\n", - &iph->saddr, &iph->daddr); + pr_info("source route option %pI4 -> %pI4\n", + &iph->saddr, &iph->daddr); goto drop; } } @@ -309,9 +311,9 @@ static inline int ip_rcv_options(struct sk_buff *skb) goto drop; } - return 0; + return false; drop: - return -1; + return true; } static int ip_rcv_finish(struct sk_buff *skb) diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 1e60f7679075..a0d0d9d9b870 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -9,6 +9,8 @@ * */ +#define pr_fmt(fmt) "IPv4: " fmt + #include <linux/capability.h> #include <linux/module.h> #include <linux/slab.h> @@ -573,11 +575,11 @@ void ip_forward_options(struct sk_buff *skb) } if (srrptr + 3 <= srrspace) { opt->is_changed = 1; - ip_rt_get_source(&optptr[srrptr-1], skb, rt); ip_hdr(skb)->daddr = opt->nexthop; + ip_rt_get_source(&optptr[srrptr-1], skb, rt); optptr[2] = srrptr+4; } else if (net_ratelimit()) - printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); + pr_crit("%s(): Argh! Destination lost!\n", __func__); if (opt->ts_needaddr) { optptr = raw + opt->ts; ip_rt_get_source(&optptr[optptr[2]-9], skb, rt); diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 8aa87c19fa00..2fd0fba77124 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -445,11 +445,6 @@ out: } -static void opt_kfree_rcu(struct rcu_head *head) -{ - kfree(container_of(head, struct ip_options_rcu, rcu)); -} - /* * Socket option code for IP. This is the end of the line after any * TCP,UDP etc options on an IP socket. @@ -469,6 +464,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) || + optname == IP_UNICAST_IF || optname == IP_MULTICAST_TTL || optname == IP_MULTICAST_ALL || optname == IP_MULTICAST_LOOP || @@ -525,7 +521,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, } rcu_assign_pointer(inet->inet_opt, opt); if (old) - call_rcu(&old->rcu, opt_kfree_rcu); + kfree_rcu(old, rcu); break; } case IP_PKTINFO: @@ -628,6 +624,35 @@ static int do_ip_setsockopt(struct sock *sk, int level, goto e_inval; inet->mc_loop = !!val; break; + case IP_UNICAST_IF: + { + struct net_device *dev = NULL; + int ifindex; + + if (optlen != sizeof(int)) + goto e_inval; + + ifindex = (__force int)ntohl((__force __be32)val); + if (ifindex == 0) { + inet->uc_index = 0; + err = 0; + break; + } + + dev = dev_get_by_index(sock_net(sk), ifindex); + err = -EADDRNOTAVAIL; + if (!dev) + break; + dev_put(dev); + + err = -EINVAL; + if (sk->sk_bound_dev_if) + break; + + inet->uc_index = ifindex; + err = 0; + break; + } case IP_MULTICAST_IF: { struct ip_mreqn mreq; @@ -1178,6 +1203,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, case IP_MULTICAST_LOOP: val = inet->mc_loop; break; + case IP_UNICAST_IF: + val = (__force int)htonl((__u32) inet->uc_index); + break; case IP_MULTICAST_IF: { struct in_addr addr; @@ -1256,6 +1284,10 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, int hlim = inet->mc_ttl; put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim); } + if (inet->cmsg_flags & IP_CMSG_TOS) { + int tos = inet->rcv_tos; + put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos); + } len -= msg.msg_controllen; return put_user(len, optlen); } diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index c857f6f49b03..63b64c45a826 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -156,11 +156,11 @@ static const struct net_protocol ipcomp4_protocol = { static int __init ipcomp4_init(void) { if (xfrm_register_type(&ipcomp_type, AF_INET) < 0) { - printk(KERN_INFO "ipcomp init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) { - printk(KERN_INFO "ipcomp init: can't add protocol\n"); + pr_info("%s: can't add protocol\n", __func__); xfrm_unregister_type(&ipcomp_type, AF_INET); return -EAGAIN; } @@ -170,9 +170,9 @@ static int __init ipcomp4_init(void) static void __exit ipcomp4_fini(void) { if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) - printk(KERN_INFO "ip ipcomp close: can't remove protocol\n"); + pr_info("%s: can't remove protocol\n", __func__); if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0) - printk(KERN_INFO "ip ipcomp close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ipcomp4_init); diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 6e412a60a91f..92ac7e7363a0 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -214,7 +214,7 @@ static int __init ic_open_devs(void) if (!(dev->flags & IFF_LOOPBACK)) continue; if (dev_change_flags(dev, dev->flags | IFF_UP) < 0) - printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name); + pr_err("IP-Config: Failed to open %s\n", dev->name); } for_each_netdev(&init_net, dev) { @@ -223,7 +223,8 @@ static int __init ic_open_devs(void) if (dev->mtu >= 364) able |= IC_BOOTP; else - printk(KERN_WARNING "DHCP/BOOTP: Ignoring device %s, MTU %d too small", dev->name, dev->mtu); + pr_warn("DHCP/BOOTP: Ignoring device %s, MTU %d too small", + dev->name, dev->mtu); if (!(dev->flags & IFF_NOARP)) able |= IC_RARP; able &= ic_proto_enabled; @@ -231,7 +232,8 @@ static int __init ic_open_devs(void) continue; oflags = dev->flags; if (dev_change_flags(dev, oflags | IFF_UP) < 0) { - printk(KERN_ERR "IP-Config: Failed to open %s\n", dev->name); + pr_err("IP-Config: Failed to open %s\n", + dev->name); continue; } if (!(d = kmalloc(sizeof(struct ic_device), GFP_KERNEL))) { @@ -273,9 +275,10 @@ have_carrier: if (!ic_first_dev) { if (user_dev_name[0]) - printk(KERN_ERR "IP-Config: Device `%s' not found.\n", user_dev_name); + pr_err("IP-Config: Device `%s' not found\n", + user_dev_name); else - printk(KERN_ERR "IP-Config: No network devices available.\n"); + pr_err("IP-Config: No network devices available\n"); return -ENODEV; } return 0; @@ -359,17 +362,20 @@ static int __init ic_setup_if(void) strcpy(ir.ifr_ifrn.ifrn_name, ic_dev->name); set_sockaddr(sin, ic_myaddr, 0); if ((err = ic_devinet_ioctl(SIOCSIFADDR, &ir)) < 0) { - printk(KERN_ERR "IP-Config: Unable to set interface address (%d).\n", err); + pr_err("IP-Config: Unable to set interface address (%d)\n", + err); return -1; } set_sockaddr(sin, ic_netmask, 0); if ((err = ic_devinet_ioctl(SIOCSIFNETMASK, &ir)) < 0) { - printk(KERN_ERR "IP-Config: Unable to set interface netmask (%d).\n", err); + pr_err("IP-Config: Unable to set interface netmask (%d)\n", + err); return -1; } set_sockaddr(sin, ic_myaddr | ~ic_netmask, 0); if ((err = ic_devinet_ioctl(SIOCSIFBRDADDR, &ir)) < 0) { - printk(KERN_ERR "IP-Config: Unable to set interface broadcast address (%d).\n", err); + pr_err("IP-Config: Unable to set interface broadcast address (%d)\n", + err); return -1; } /* Handle the case where we need non-standard MTU on the boot link (a network @@ -380,8 +386,8 @@ static int __init ic_setup_if(void) strcpy(ir.ifr_name, ic_dev->name); ir.ifr_mtu = ic_dev_mtu; if ((err = ic_dev_ioctl(SIOCSIFMTU, &ir)) < 0) - printk(KERN_ERR "IP-Config: Unable to set interface mtu to %d (%d).\n", - ic_dev_mtu, err); + pr_err("IP-Config: Unable to set interface mtu to %d (%d)\n", + ic_dev_mtu, err); } return 0; } @@ -396,7 +402,7 @@ static int __init ic_setup_routes(void) memset(&rm, 0, sizeof(rm)); if ((ic_gateway ^ ic_myaddr) & ic_netmask) { - printk(KERN_ERR "IP-Config: Gateway not on directly connected network.\n"); + pr_err("IP-Config: Gateway not on directly connected network\n"); return -1; } set_sockaddr((struct sockaddr_in *) &rm.rt_dst, 0, 0); @@ -404,7 +410,8 @@ static int __init ic_setup_routes(void) set_sockaddr((struct sockaddr_in *) &rm.rt_gateway, ic_gateway, 0); rm.rt_flags = RTF_UP | RTF_GATEWAY; if ((err = ic_route_ioctl(SIOCADDRT, &rm)) < 0) { - printk(KERN_ERR "IP-Config: Cannot add default route (%d).\n", err); + pr_err("IP-Config: Cannot add default route (%d)\n", + err); return -1; } } @@ -437,8 +444,8 @@ static int __init ic_defaults(void) else if (IN_CLASSC(ntohl(ic_myaddr))) ic_netmask = htonl(IN_CLASSC_NET); else { - printk(KERN_ERR "IP-Config: Unable to guess netmask for address %pI4\n", - &ic_myaddr); + pr_err("IP-Config: Unable to guess netmask for address %pI4\n", + &ic_myaddr); return -1; } printk("IP-Config: Guessing netmask %pI4\n", &ic_netmask); @@ -688,8 +695,8 @@ ic_dhcp_init_options(u8 *options) e += len; } if (*vendor_class_identifier) { - printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", - vendor_class_identifier); + pr_info("DHCP: sending class identifier \"%s\"\n", + vendor_class_identifier); *e++ = 60; /* Class-identifier */ len = strlen(vendor_class_identifier); *e++ = len; @@ -949,8 +956,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str /* Fragments are not supported */ if (ip_is_fragment(h)) { if (net_ratelimit()) - printk(KERN_ERR "DHCP/BOOTP: Ignoring fragmented " - "reply.\n"); + pr_err("DHCP/BOOTP: Ignoring fragmented reply\n"); goto drop; } @@ -999,8 +1005,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str if (b->op != BOOTP_REPLY || b->xid != d->xid) { if (net_ratelimit()) - printk(KERN_ERR "DHCP/BOOTP: Reply not for us, " - "op[%x] xid[%x]\n", + pr_err("DHCP/BOOTP: Reply not for us, op[%x] xid[%x]\n", b->op, b->xid); goto drop_unlock; } @@ -1008,7 +1013,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str /* Is it a reply for the device we are configuring? */ if (b->xid != ic_dev_xid) { if (net_ratelimit()) - printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n"); + pr_err("DHCP/BOOTP: Ignoring delayed packet\n"); goto drop_unlock; } @@ -1146,17 +1151,17 @@ static int __init ic_dynamic(void) * are missing, and without DHCP/BOOTP/RARP we are unable to get it. */ if (!ic_proto_enabled) { - printk(KERN_ERR "IP-Config: Incomplete network configuration information.\n"); + pr_err("IP-Config: Incomplete network configuration information\n"); return -1; } #ifdef IPCONFIG_BOOTP if ((ic_proto_enabled ^ ic_proto_have_if) & IC_BOOTP) - printk(KERN_ERR "DHCP/BOOTP: No suitable device found.\n"); + pr_err("DHCP/BOOTP: No suitable device found\n"); #endif #ifdef IPCONFIG_RARP if ((ic_proto_enabled ^ ic_proto_have_if) & IC_RARP) - printk(KERN_ERR "RARP: No suitable device found.\n"); + pr_err("RARP: No suitable device found\n"); #endif if (!ic_proto_have_if) @@ -1183,11 +1188,11 @@ static int __init ic_dynamic(void) * [Actually we could now, but the nothing else running note still * applies.. - AC] */ - printk(KERN_NOTICE "Sending %s%s%s requests .", - do_bootp - ? ((ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP") : "", - (do_bootp && do_rarp) ? " and " : "", - do_rarp ? "RARP" : ""); + pr_notice("Sending %s%s%s requests .", + do_bootp + ? ((ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP") : "", + (do_bootp && do_rarp) ? " and " : "", + do_rarp ? "RARP" : ""); start_jiffies = jiffies; d = ic_first_dev; @@ -1216,13 +1221,13 @@ static int __init ic_dynamic(void) (ic_proto_enabled & IC_USE_DHCP) && ic_dhcp_msgtype != DHCPACK) { ic_got_reply = 0; - printk(KERN_CONT ","); + pr_cont(","); continue; } #endif /* IPCONFIG_DHCP */ if (ic_got_reply) { - printk(KERN_CONT " OK\n"); + pr_cont(" OK\n"); break; } @@ -1230,7 +1235,7 @@ static int __init ic_dynamic(void) continue; if (! --retries) { - printk(KERN_CONT " timed out!\n"); + pr_cont(" timed out!\n"); break; } @@ -1240,7 +1245,7 @@ static int __init ic_dynamic(void) if (timeout > CONF_TIMEOUT_MAX) timeout = CONF_TIMEOUT_MAX; - printk(KERN_CONT "."); + pr_cont("."); } #ifdef IPCONFIG_BOOTP @@ -1260,8 +1265,8 @@ static int __init ic_dynamic(void) printk("IP-Config: Got %s answer from %pI4, ", ((ic_got_reply & IC_RARP) ? "RARP" : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"), - &ic_servaddr); - printk(KERN_CONT "my address is %pI4\n", &ic_myaddr); + &ic_servaddr); + pr_cont("my address is %pI4\n", &ic_myaddr); return 0; } @@ -1437,24 +1442,22 @@ static int __init ip_auto_config(void) */ #ifdef CONFIG_ROOT_NFS if (ROOT_DEV == Root_NFS) { - printk(KERN_ERR - "IP-Config: Retrying forever (NFS root)...\n"); + pr_err("IP-Config: Retrying forever (NFS root)...\n"); goto try_try_again; } #endif if (--retries) { - printk(KERN_ERR - "IP-Config: Reopening network devices...\n"); + pr_err("IP-Config: Reopening network devices...\n"); goto try_try_again; } /* Oh, well. At least we tried. */ - printk(KERN_ERR "IP-Config: Auto-configuration of network failed.\n"); + pr_err("IP-Config: Auto-configuration of network failed\n"); return -1; } #else /* !DYNAMIC */ - printk(KERN_ERR "IP-Config: Incomplete network configuration information.\n"); + pr_err("IP-Config: Incomplete network configuration information\n"); ic_close_devs(); return -1; #endif /* IPCONFIG_DYNAMIC */ @@ -1492,19 +1495,16 @@ static int __init ip_auto_config(void) /* * Clue in the operator. */ - printk("IP-Config: Complete:\n"); - printk(" device=%s", ic_dev->name); - printk(KERN_CONT ", addr=%pI4", &ic_myaddr); - printk(KERN_CONT ", mask=%pI4", &ic_netmask); - printk(KERN_CONT ", gw=%pI4", &ic_gateway); - printk(KERN_CONT ",\n host=%s, domain=%s, nis-domain=%s", - utsname()->nodename, ic_domain, utsname()->domainname); - printk(KERN_CONT ",\n bootserver=%pI4", &ic_servaddr); - printk(KERN_CONT ", rootserver=%pI4", &root_server_addr); - printk(KERN_CONT ", rootpath=%s", root_server_path); + pr_info("IP-Config: Complete:\n"); + pr_info(" device=%s, addr=%pI4, mask=%pI4, gw=%pI4\n", + ic_dev->name, &ic_myaddr, &ic_netmask, &ic_gateway); + pr_info(" host=%s, domain=%s, nis-domain=%s\n", + utsname()->nodename, ic_domain, utsname()->domainname); + pr_info(" bootserver=%pI4, rootserver=%pI4, rootpath=%s", + &ic_servaddr, &root_server_addr, root_server_path); if (ic_dev_mtu) - printk(KERN_CONT ", mtu=%d", ic_dev_mtu); - printk(KERN_CONT "\n"); + pr_cont(", mtu=%d", ic_dev_mtu); + pr_cont("\n"); #endif /* !SILENT */ return 0; @@ -1637,8 +1637,8 @@ static int __init vendor_class_identifier_setup(char *addrs) if (strlcpy(vendor_class_identifier, addrs, sizeof(vendor_class_identifier)) >= sizeof(vendor_class_identifier)) - printk(KERN_WARNING "DHCP: vendorclass too long, truncated to \"%s\"", - vendor_class_identifier); + pr_warn("DHCP: vendorclass too long, truncated to \"%s\"", + vendor_class_identifier); return 1; } diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 22a199315309..ae1413e3f2f8 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -454,8 +454,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_fifo_errors++; goto tx_error; } - if ((dst = rt->rt_gateway) == 0) - goto tx_error_icmp; + dst = rt->rt_gateway; } rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, @@ -893,7 +892,7 @@ static int __init ipip_init(void) err = xfrm4_tunnel_register(&ipip_handler, AF_INET); if (err < 0) { unregister_pernet_device(&ipip_net_ops); - printk(KERN_INFO "ipip init: can't register tunnel\n"); + pr_info("%s: can't register tunnel\n", __func__); } return err; } @@ -901,7 +900,7 @@ static int __init ipip_init(void) static void __exit ipip_fini(void) { if (xfrm4_tunnel_deregister(&ipip_handler, AF_INET)) - printk(KERN_INFO "ipip close: can't deregister tunnel\n"); + pr_info("%s: can't deregister tunnel\n", __func__); unregister_pernet_device(&ipip_net_ops); } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 7bc2db6db8d4..0518a4fb177b 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -951,7 +951,7 @@ static int ipmr_cache_report(struct mr_table *mrt, rcu_read_unlock(); if (ret < 0) { if (net_ratelimit()) - printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); + pr_warn("mroute: pending queue full, dropping entries\n"); kfree_skb(skb); } @@ -2538,7 +2538,7 @@ int __init ip_mr_init(void) goto reg_notif_fail; #ifdef CONFIG_IP_PIMSM_V2 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) { - printk(KERN_ERR "ip_mr_init: can't add PIM protocol\n"); + pr_err("%s: can't add PIM protocol\n", __func__); err = -EAGAIN; goto add_proto_fail; } diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 74dfc9e5211f..fcc543cd987a 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -123,15 +123,6 @@ config IP_NF_TARGET_REJECT To compile it as a module, choose M here. If unsure, say N. -config IP_NF_TARGET_LOG - tristate "LOG target support" - default m if NETFILTER_ADVANCED=n - help - This option adds a `LOG' target, which allows you to create rules in - any iptables table which records the packet header to the syslog. - - To compile it as a module, choose M here. If unsure, say N. - config IP_NF_TARGET_ULOG tristate "ULOG target support" default m if NETFILTER_ADVANCED=n diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 213a462b739b..240b68469a7a 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile @@ -54,7 +54,6 @@ obj-$(CONFIG_IP_NF_MATCH_RPFILTER) += ipt_rpfilter.o # targets obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o -obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c deleted file mode 100644 index d76d6c9ed946..000000000000 --- a/net/ipv4/netfilter/ipt_LOG.c +++ /dev/null @@ -1,516 +0,0 @@ -/* - * This is a module which is used for logging packets. - */ - -/* (C) 1999-2001 Paul `Rusty' Russell - * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/skbuff.h> -#include <linux/if_arp.h> -#include <linux/ip.h> -#include <net/icmp.h> -#include <net/udp.h> -#include <net/tcp.h> -#include <net/route.h> - -#include <linux/netfilter.h> -#include <linux/netfilter/x_tables.h> -#include <linux/netfilter_ipv4/ipt_LOG.h> -#include <net/netfilter/nf_log.h> -#include <net/netfilter/xt_log.h> - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>"); -MODULE_DESCRIPTION("Xtables: IPv4 packet logging to syslog"); - -/* One level of recursion won't kill us */ -static void dump_packet(struct sbuff *m, - const struct nf_loginfo *info, - const struct sk_buff *skb, - unsigned int iphoff) -{ - struct iphdr _iph; - const struct iphdr *ih; - unsigned int logflags; - - if (info->type == NF_LOG_TYPE_LOG) - logflags = info->u.log.logflags; - else - logflags = NF_LOG_MASK; - - ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph); - if (ih == NULL) { - sb_add(m, "TRUNCATED"); - return; - } - - /* Important fields: - * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */ - /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */ - sb_add(m, "SRC=%pI4 DST=%pI4 ", - &ih->saddr, &ih->daddr); - - /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */ - sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ", - ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK, - ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id)); - - /* Max length: 6 "CE DF MF " */ - if (ntohs(ih->frag_off) & IP_CE) - sb_add(m, "CE "); - if (ntohs(ih->frag_off) & IP_DF) - sb_add(m, "DF "); - if (ntohs(ih->frag_off) & IP_MF) - sb_add(m, "MF "); - - /* Max length: 11 "FRAG:65535 " */ - if (ntohs(ih->frag_off) & IP_OFFSET) - sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET); - - if ((logflags & IPT_LOG_IPOPT) && - ih->ihl * 4 > sizeof(struct iphdr)) { - const unsigned char *op; - unsigned char _opt[4 * 15 - sizeof(struct iphdr)]; - unsigned int i, optsize; - - optsize = ih->ihl * 4 - sizeof(struct iphdr); - op = skb_header_pointer(skb, iphoff+sizeof(_iph), - optsize, _opt); - if (op == NULL) { - sb_add(m, "TRUNCATED"); - return; - } - - /* Max length: 127 "OPT (" 15*4*2chars ") " */ - sb_add(m, "OPT ("); - for (i = 0; i < optsize; i++) - sb_add(m, "%02X", op[i]); - sb_add(m, ") "); - } - - switch (ih->protocol) { - case IPPROTO_TCP: { - struct tcphdr _tcph; - const struct tcphdr *th; - - /* Max length: 10 "PROTO=TCP " */ - sb_add(m, "PROTO=TCP "); - - if (ntohs(ih->frag_off) & IP_OFFSET) - break; - - /* Max length: 25 "INCOMPLETE [65535 bytes] " */ - th = skb_header_pointer(skb, iphoff + ih->ihl * 4, - sizeof(_tcph), &_tcph); - if (th == NULL) { - sb_add(m, "INCOMPLETE [%u bytes] ", - skb->len - iphoff - ih->ihl*4); - break; - } - - /* Max length: 20 "SPT=65535 DPT=65535 " */ - sb_add(m, "SPT=%u DPT=%u ", - ntohs(th->source), ntohs(th->dest)); - /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */ - if (logflags & IPT_LOG_TCPSEQ) - sb_add(m, "SEQ=%u ACK=%u ", - ntohl(th->seq), ntohl(th->ack_seq)); - /* Max length: 13 "WINDOW=65535 " */ - sb_add(m, "WINDOW=%u ", ntohs(th->window)); - /* Max length: 9 "RES=0x3F " */ - sb_add(m, "RES=0x%02x ", (u8)(ntohl(tcp_flag_word(th) & TCP_RESERVED_BITS) >> 22)); - /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */ - if (th->cwr) - sb_add(m, "CWR "); - if (th->ece) - sb_add(m, "ECE "); - if (th->urg) - sb_add(m, "URG "); - if (th->ack) - sb_add(m, "ACK "); - if (th->psh) - sb_add(m, "PSH "); - if (th->rst) - sb_add(m, "RST "); - if (th->syn) - sb_add(m, "SYN "); - if (th->fin) - sb_add(m, "FIN "); - /* Max length: 11 "URGP=65535 " */ - sb_add(m, "URGP=%u ", ntohs(th->urg_ptr)); - - if ((logflags & IPT_LOG_TCPOPT) && - th->doff * 4 > sizeof(struct tcphdr)) { - unsigned char _opt[4 * 15 - sizeof(struct tcphdr)]; - const unsigned char *op; - unsigned int i, optsize; - - optsize = th->doff * 4 - sizeof(struct tcphdr); - op = skb_header_pointer(skb, - iphoff+ih->ihl*4+sizeof(_tcph), - optsize, _opt); - if (op == NULL) { - sb_add(m, "TRUNCATED"); - return; - } - - /* Max length: 127 "OPT (" 15*4*2chars ") " */ - sb_add(m, "OPT ("); - for (i = 0; i < optsize; i++) - sb_add(m, "%02X", op[i]); - sb_add(m, ") "); - } - break; - } - case IPPROTO_UDP: - case IPPROTO_UDPLITE: { - struct udphdr _udph; - const struct udphdr *uh; - - if (ih->protocol == IPPROTO_UDP) - /* Max length: 10 "PROTO=UDP " */ - sb_add(m, "PROTO=UDP " ); - else /* Max length: 14 "PROTO=UDPLITE " */ - sb_add(m, "PROTO=UDPLITE "); - - if (ntohs(ih->frag_off) & IP_OFFSET) - break; - - /* Max length: 25 "INCOMPLETE [65535 bytes] " */ - uh = skb_header_pointer(skb, iphoff+ih->ihl*4, - sizeof(_udph), &_udph); - if (uh == NULL) { - sb_add(m, "INCOMPLETE [%u bytes] ", - skb->len - iphoff - ih->ihl*4); - break; - } - - /* Max length: 20 "SPT=65535 DPT=65535 " */ - sb_add(m, "SPT=%u DPT=%u LEN=%u ", - ntohs(uh->source), ntohs(uh->dest), - ntohs(uh->len)); - break; - } - case IPPROTO_ICMP: { - struct icmphdr _icmph; - const struct icmphdr *ich; - static const size_t required_len[NR_ICMP_TYPES+1] - = { [ICMP_ECHOREPLY] = 4, - [ICMP_DEST_UNREACH] - = 8 + sizeof(struct iphdr), - [ICMP_SOURCE_QUENCH] - = 8 + sizeof(struct iphdr), - [ICMP_REDIRECT] - = 8 + sizeof(struct iphdr), - [ICMP_ECHO] = 4, - [ICMP_TIME_EXCEEDED] - = 8 + sizeof(struct iphdr), - [ICMP_PARAMETERPROB] - = 8 + sizeof(struct iphdr), - [ICMP_TIMESTAMP] = 20, - [ICMP_TIMESTAMPREPLY] = 20, - [ICMP_ADDRESS] = 12, - [ICMP_ADDRESSREPLY] = 12 }; - - /* Max length: 11 "PROTO=ICMP " */ - sb_add(m, "PROTO=ICMP "); - - if (ntohs(ih->frag_off) & IP_OFFSET) - break; - - /* Max length: 25 "INCOMPLETE [65535 bytes] " */ - ich = skb_header_pointer(skb, iphoff + ih->ihl * 4, - sizeof(_icmph), &_icmph); - if (ich == NULL) { - sb_add(m, "INCOMPLETE [%u bytes] ", - skb->len - iphoff - ih->ihl*4); - break; - } - - /* Max length: 18 "TYPE=255 CODE=255 " */ - sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code); - - /* Max length: 25 "INCOMPLETE [65535 bytes] " */ - if (ich->type <= NR_ICMP_TYPES && - required_len[ich->type] && - skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) { - sb_add(m, "INCOMPLETE [%u bytes] ", - skb->len - iphoff - ih->ihl*4); - break; - } - - switch (ich->type) { - case ICMP_ECHOREPLY: - case ICMP_ECHO: - /* Max length: 19 "ID=65535 SEQ=65535 " */ - sb_add(m, "ID=%u SEQ=%u ", - ntohs(ich->un.echo.id), - ntohs(ich->un.echo.sequence)); - break; - - case ICMP_PARAMETERPROB: - /* Max length: 14 "PARAMETER=255 " */ - sb_add(m, "PARAMETER=%u ", - ntohl(ich->un.gateway) >> 24); - break; - case ICMP_REDIRECT: - /* Max length: 24 "GATEWAY=255.255.255.255 " */ - sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway); - /* Fall through */ - case ICMP_DEST_UNREACH: - case ICMP_SOURCE_QUENCH: - case ICMP_TIME_EXCEEDED: - /* Max length: 3+maxlen */ - if (!iphoff) { /* Only recurse once. */ - sb_add(m, "["); - dump_packet(m, info, skb, - iphoff + ih->ihl*4+sizeof(_icmph)); - sb_add(m, "] "); - } - - /* Max length: 10 "MTU=65535 " */ - if (ich->type == ICMP_DEST_UNREACH && - ich->code == ICMP_FRAG_NEEDED) - sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu)); - } - break; - } - /* Max Length */ - case IPPROTO_AH: { - struct ip_auth_hdr _ahdr; - const struct ip_auth_hdr *ah; - - if (ntohs(ih->frag_off) & IP_OFFSET) - break; - - /* Max length: 9 "PROTO=AH " */ - sb_add(m, "PROTO=AH "); - - /* Max length: 25 "INCOMPLETE [65535 bytes] " */ - ah = skb_header_pointer(skb, iphoff+ih->ihl*4, - sizeof(_ahdr), &_ahdr); - if (ah == NULL) { - sb_add(m, "INCOMPLETE [%u bytes] ", - skb->len - iphoff - ih->ihl*4); - break; - } - - /* Length: 15 "SPI=0xF1234567 " */ - sb_add(m, "SPI=0x%x ", ntohl(ah->spi)); - break; - } - case IPPROTO_ESP: { - struct ip_esp_hdr _esph; - const struct ip_esp_hdr *eh; - - /* Max length: 10 "PROTO=ESP " */ - sb_add(m, "PROTO=ESP "); - - if (ntohs(ih->frag_off) & IP_OFFSET) - break; - - /* Max length: 25 "INCOMPLETE [65535 bytes] " */ - eh = skb_header_pointer(skb, iphoff+ih->ihl*4, - sizeof(_esph), &_esph); - if (eh == NULL) { - sb_add(m, "INCOMPLETE [%u bytes] ", - skb->len - iphoff - ih->ihl*4); - break; - } - - /* Length: 15 "SPI=0xF1234567 " */ - sb_add(m, "SPI=0x%x ", ntohl(eh->spi)); - break; - } - /* Max length: 10 "PROTO 255 " */ - default: - sb_add(m, "PROTO=%u ", ih->protocol); - } - - /* Max length: 15 "UID=4294967295 " */ - if ((logflags & IPT_LOG_UID) && !iphoff && skb->sk) { - read_lock_bh(&skb->sk->sk_callback_lock); - if (skb->sk->sk_socket && skb->sk->sk_socket->file) - sb_add(m, "UID=%u GID=%u ", - skb->sk->sk_socket->file->f_cred->fsuid, - skb->sk->sk_socket->file->f_cred->fsgid); - read_unlock_bh(&skb->sk->sk_callback_lock); - } - - /* Max length: 16 "MARK=0xFFFFFFFF " */ - if (!iphoff && skb->mark) - sb_add(m, "MARK=0x%x ", skb->mark); - - /* Proto Max log string length */ - /* IP: 40+46+6+11+127 = 230 */ - /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */ - /* UDP: 10+max(25,20) = 35 */ - /* UDPLITE: 14+max(25,20) = 39 */ - /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */ - /* ESP: 10+max(25)+15 = 50 */ - /* AH: 9+max(25)+15 = 49 */ - /* unknown: 10 */ - - /* (ICMP allows recursion one level deep) */ - /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */ - /* maxlen = 230+ 91 + 230 + 252 = 803 */ -} - -static void dump_mac_header(struct sbuff *m, - const struct nf_loginfo *info, - const struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - unsigned int logflags = 0; - - if (info->type == NF_LOG_TYPE_LOG) - logflags = info->u.log.logflags; - - if (!(logflags & IPT_LOG_MACDECODE)) - goto fallback; - - switch (dev->type) { - case ARPHRD_ETHER: - sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ", - eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, - ntohs(eth_hdr(skb)->h_proto)); - return; - default: - break; - } - -fallback: - sb_add(m, "MAC="); - if (dev->hard_header_len && - skb->mac_header != skb->network_header) { - const unsigned char *p = skb_mac_header(skb); - unsigned int i; - - sb_add(m, "%02x", *p++); - for (i = 1; i < dev->hard_header_len; i++, p++) - sb_add(m, ":%02x", *p); - } - sb_add(m, " "); -} - -static struct nf_loginfo default_loginfo = { - .type = NF_LOG_TYPE_LOG, - .u = { - .log = { - .level = 5, - .logflags = NF_LOG_MASK, - }, - }, -}; - -static void -ipt_log_packet(u_int8_t pf, - unsigned int hooknum, - const struct sk_buff *skb, - const struct net_device *in, - const struct net_device *out, - const struct nf_loginfo *loginfo, - const char *prefix) -{ - struct sbuff *m = sb_open(); - - if (!loginfo) - loginfo = &default_loginfo; - - sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level, - prefix, - in ? in->name : "", - out ? out->name : ""); -#ifdef CONFIG_BRIDGE_NETFILTER - if (skb->nf_bridge) { - const struct net_device *physindev; - const struct net_device *physoutdev; - - physindev = skb->nf_bridge->physindev; - if (physindev && in != physindev) - sb_add(m, "PHYSIN=%s ", physindev->name); - physoutdev = skb->nf_bridge->physoutdev; - if (physoutdev && out != physoutdev) - sb_add(m, "PHYSOUT=%s ", physoutdev->name); - } -#endif - - if (in != NULL) - dump_mac_header(m, loginfo, skb); - - dump_packet(m, loginfo, skb, 0); - - sb_close(m); -} - -static unsigned int -log_tg(struct sk_buff *skb, const struct xt_action_param *par) -{ - const struct ipt_log_info *loginfo = par->targinfo; - struct nf_loginfo li; - - li.type = NF_LOG_TYPE_LOG; - li.u.log.level = loginfo->level; - li.u.log.logflags = loginfo->logflags; - - ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in, par->out, &li, - loginfo->prefix); - return XT_CONTINUE; -} - -static int log_tg_check(const struct xt_tgchk_param *par) -{ - const struct ipt_log_info *loginfo = par->targinfo; - - if (loginfo->level >= 8) { - pr_debug("level %u >= 8\n", loginfo->level); - return -EINVAL; - } - if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') { - pr_debug("prefix is not null-terminated\n"); - return -EINVAL; - } - return 0; -} - -static struct xt_target log_tg_reg __read_mostly = { - .name = "LOG", - .family = NFPROTO_IPV4, - .target = log_tg, - .targetsize = sizeof(struct ipt_log_info), - .checkentry = log_tg_check, - .me = THIS_MODULE, -}; - -static struct nf_logger ipt_log_logger __read_mostly = { - .name = "ipt_LOG", - .logfn = &ipt_log_packet, - .me = THIS_MODULE, -}; - -static int __init log_tg_init(void) -{ - int ret; - - ret = xt_register_target(&log_tg_reg); - if (ret < 0) - return ret; - nf_log_register(NFPROTO_IPV4, &ipt_log_logger); - return 0; -} - -static void __exit log_tg_exit(void) -{ - nf_log_unregister(&ipt_log_logger); - xt_unregister_target(&log_tg_reg); -} - -module_init(log_tg_init); -module_exit(log_tg_exit); diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c index ab5b27a2916f..7cbe9cb261c2 100644 --- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c @@ -75,25 +75,31 @@ static int icmp_print_tuple(struct seq_file *s, ntohs(tuple->src.u.icmp.id)); } +static unsigned int *icmp_get_timeouts(struct net *net) +{ + return &nf_ct_icmp_timeout; +} + /* Returns verdict for packet, or -1 for invalid. */ static int icmp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, - unsigned int hooknum) + unsigned int hooknum, + unsigned int *timeout) { /* Do not immediately delete the connection after the first successful reply to avoid excessive conntrackd traffic and also to handle correctly ICMP echo reply duplicates. */ - nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout); + nf_ct_refresh_acct(ct, ctinfo, skb, *timeout); return NF_ACCEPT; } /* Called when a new connection for this protocol found. */ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb, - unsigned int dataoff) + unsigned int dataoff, unsigned int *timeouts) { static const u_int8_t valid_new[] = { [ICMP_ECHO] = 1, @@ -263,6 +269,44 @@ static int icmp_nlattr_tuple_size(void) } #endif +#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) + +#include <linux/netfilter/nfnetlink.h> +#include <linux/netfilter/nfnetlink_cttimeout.h> + +static int icmp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data) +{ + unsigned int *timeout = data; + + if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) { + *timeout = + ntohl(nla_get_be32(tb[CTA_TIMEOUT_ICMP_TIMEOUT])) * HZ; + } else { + /* Set default ICMP timeout. */ + *timeout = nf_ct_icmp_timeout; + } + return 0; +} + +static int +icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) +{ + const unsigned int *timeout = data; + + NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)); + + return 0; + +nla_put_failure: + return -ENOSPC; +} + +static const struct nla_policy +icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = { + [CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 }, +}; +#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ + #ifdef CONFIG_SYSCTL static struct ctl_table_header *icmp_sysctl_header; static struct ctl_table icmp_sysctl_table[] = { @@ -298,6 +342,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly = .invert_tuple = icmp_invert_tuple, .print_tuple = icmp_print_tuple, .packet = icmp_packet, + .get_timeouts = icmp_get_timeouts, .new = icmp_new, .error = icmp_error, .destroy = NULL, @@ -308,6 +353,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly = .nlattr_to_tuple = icmp_nlattr_to_tuple, .nla_policy = icmp_nla_policy, #endif +#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) + .ctnl_timeout = { + .nlattr_to_obj = icmp_timeout_nlattr_to_obj, + .obj_to_nlattr = icmp_timeout_obj_to_nlattr, + .nlattr_max = CTA_TIMEOUT_ICMP_MAX, + .obj_size = sizeof(unsigned int), + .nla_policy = icmp_timeout_nla_policy, + }, +#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL .ctl_table_header = &icmp_sysctl_header, .ctl_table = icmp_sysctl_table, diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index a708933dc230..abb52adf5acd 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -686,6 +686,11 @@ static struct pernet_operations nf_nat_net_ops = { .exit = nf_nat_net_exit, }; +static struct nf_ct_helper_expectfn follow_master_nat = { + .name = "nat-follow-master", + .expectfn = nf_nat_follow_master, +}; + static int __init nf_nat_init(void) { size_t i; @@ -717,6 +722,8 @@ static int __init nf_nat_init(void) l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); + nf_ct_helper_expectfn_register(&follow_master_nat); + BUG_ON(nf_nat_seq_adjust_hook != NULL); RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust); BUG_ON(nfnetlink_parse_nat_setup_hook != NULL); @@ -736,6 +743,7 @@ static void __exit nf_nat_cleanup(void) unregister_pernet_subsys(&nf_nat_net_ops); nf_ct_l3proto_put(l3proto); nf_ct_extend_unregister(&nat_extend); + nf_ct_helper_expectfn_unregister(&follow_master_nat); RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL); RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL); RCU_INIT_POINTER(nf_ct_nat_offset, NULL); diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c index dc1dd912baf4..82536701e3a3 100644 --- a/net/ipv4/netfilter/nf_nat_h323.c +++ b/net/ipv4/netfilter/nf_nat_h323.c @@ -568,6 +568,16 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct, return 0; } +static struct nf_ct_helper_expectfn q931_nat = { + .name = "Q.931", + .expectfn = ip_nat_q931_expect, +}; + +static struct nf_ct_helper_expectfn callforwarding_nat = { + .name = "callforwarding", + .expectfn = ip_nat_callforwarding_expect, +}; + /****************************************************************************/ static int __init init(void) { @@ -590,6 +600,8 @@ static int __init init(void) RCU_INIT_POINTER(nat_h245_hook, nat_h245); RCU_INIT_POINTER(nat_callforwarding_hook, nat_callforwarding); RCU_INIT_POINTER(nat_q931_hook, nat_q931); + nf_ct_helper_expectfn_register(&q931_nat); + nf_ct_helper_expectfn_register(&callforwarding_nat); return 0; } @@ -605,6 +617,8 @@ static void __exit fini(void) RCU_INIT_POINTER(nat_h245_hook, NULL); RCU_INIT_POINTER(nat_callforwarding_hook, NULL); RCU_INIT_POINTER(nat_q931_hook, NULL); + nf_ct_helper_expectfn_unregister(&q931_nat); + nf_ct_helper_expectfn_unregister(&callforwarding_nat); synchronize_rcu(); } diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index d0319f96269f..57932c43960e 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -526,6 +526,11 @@ err1: return NF_DROP; } +static struct nf_ct_helper_expectfn sip_nat = { + .name = "sip", + .expectfn = ip_nat_sip_expected, +}; + static void __exit nf_nat_sip_fini(void) { RCU_INIT_POINTER(nf_nat_sip_hook, NULL); @@ -535,6 +540,7 @@ static void __exit nf_nat_sip_fini(void) RCU_INIT_POINTER(nf_nat_sdp_port_hook, NULL); RCU_INIT_POINTER(nf_nat_sdp_session_hook, NULL); RCU_INIT_POINTER(nf_nat_sdp_media_hook, NULL); + nf_ct_helper_expectfn_unregister(&sip_nat); synchronize_rcu(); } @@ -554,6 +560,7 @@ static int __init nf_nat_sip_init(void) RCU_INIT_POINTER(nf_nat_sdp_port_hook, ip_nat_sdp_port); RCU_INIT_POINTER(nf_nat_sdp_session_hook, ip_nat_sdp_session); RCU_INIT_POINTER(nf_nat_sdp_media_hook, ip_nat_sdp_media); + nf_ct_helper_expectfn_register(&sip_nat); return 0; } diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index aea5a199c37a..ab6b36e6da15 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -156,7 +156,7 @@ static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr, struct hlist_nulls_node *hnode; pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n", - (int)ident, &daddr, dif); + (int)ident, &daddr, dif); read_lock_bh(&ping_table.lock); ping_portaddr_for_each_entry(sk, hnode, hslot) { @@ -229,7 +229,7 @@ static int ping_init_sock(struct sock *sk) static void ping_close(struct sock *sk, long timeout) { pr_debug("ping_close(sk=%p,sk->num=%u)\n", - inet_sk(sk), inet_sk(sk)->inet_num); + inet_sk(sk), inet_sk(sk)->inet_num); pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter); sk_common_release(sk); @@ -252,7 +252,7 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) return -EINVAL; pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n", - sk, addr->sin_addr.s_addr, ntohs(addr->sin_port)); + sk, addr->sin_addr.s_addr, ntohs(addr->sin_port)); chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); if (addr->sin_addr.s_addr == htonl(INADDR_ANY)) @@ -280,9 +280,9 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) } pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n", - (int)isk->inet_num, - &isk->inet_rcv_saddr, - (int)sk->sk_bound_dev_if); + (int)isk->inet_num, + &isk->inet_rcv_saddr, + (int)sk->sk_bound_dev_if); err = 0; if (isk->inet_rcv_saddr) @@ -335,7 +335,7 @@ void ping_err(struct sk_buff *skb, u32 info) return; pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type, - code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); + code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); sk = ping_v4_lookup(net, iph->daddr, iph->saddr, ntohs(icmph->un.echo.id), skb->dev->ifindex); @@ -556,7 +556,8 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; - } + } else if (!ipc.oif) + ipc.oif = inet->uc_index; flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, sk->sk_protocol, @@ -630,6 +631,7 @@ static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num); + err = -EOPNOTSUPP; if (flags & MSG_OOB) goto out; @@ -677,7 +679,7 @@ out: static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", - inet_sk(sk), inet_sk(sk)->inet_num, skb); + inet_sk(sk), inet_sk(sk)->inet_num, skb); if (sock_queue_rcv_skb(sk, skb) < 0) { kfree_skb(skb); pr_debug("ping_queue_rcv_skb -> failed\n"); @@ -703,7 +705,7 @@ void ping_rcv(struct sk_buff *skb) /* We assume the packet has already been checked by icmp_rcv */ pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n", - skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); + skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence)); /* Push ICMP header back */ skb_push(skb, skb->data - (u8 *)icmph); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 6afc807ee2ad..8af0d44e4e22 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -256,6 +256,8 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), SNMP_MIB_ITEM("TCPReqQFullDoCookies", LINUX_MIB_TCPREQQFULLDOCOOKIES), SNMP_MIB_ITEM("TCPReqQFullDrop", LINUX_MIB_TCPREQQFULLDROP), + SNMP_MIB_ITEM("TCPRetransFail", LINUX_MIB_TCPRETRANSFAIL), + SNMP_MIB_ITEM("TCPRcvCoalesce", LINUX_MIB_TCPRCVCOALESCE), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 3ccda5ae8a27..bbd604c68e68 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -491,11 +491,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (msg->msg_namelen < sizeof(*usin)) goto out; if (usin->sin_family != AF_INET) { - static int complained; - if (!complained++) - printk(KERN_INFO "%s forgot to set AF_INET in " - "raw sendmsg. Fix it!\n", - current->comm); + pr_info_once("%s: %s forgot to set AF_INET. Fix it!\n", + __func__, current->comm); err = -EAFNOSUPPORT; if (usin->sin_family) goto out; @@ -563,7 +560,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ipc.oif = inet->mc_index; if (!saddr) saddr = inet->mc_addr; - } + } else if (!ipc.oif) + ipc.oif = inet->uc_index; flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE, diff --git a/net/ipv4/route.c b/net/ipv4/route.c index bcacf54e5418..12ccf880eb88 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -62,6 +62,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "IPv4: " fmt + #include <linux/module.h> #include <asm/uaccess.h> #include <asm/system.h> @@ -132,7 +134,6 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static int ip_rt_min_advmss __read_mostly = 256; static int rt_chain_length_max __read_mostly = 20; -static int redirect_genid; static struct delayed_work expires_work; static unsigned long expires_ljiffies; @@ -937,7 +938,7 @@ static void rt_cache_invalidate(struct net *net) get_random_bytes(&shuffle, sizeof(shuffle)); atomic_add(shuffle + 1U, &net->ipv4.rt_genid); - redirect_genid++; + inetpeer_invalidate_tree(AF_INET); } /* @@ -960,7 +961,7 @@ void rt_cache_flush_batch(struct net *net) static void rt_emergency_hash_rebuild(struct net *net) { if (net_ratelimit()) - printk(KERN_WARNING "Route hash chain too long!\n"); + pr_warn("Route hash chain too long!\n"); rt_cache_invalidate(net); } @@ -1084,7 +1085,7 @@ static int rt_garbage_collect(struct dst_ops *ops) if (dst_entries_get_slow(&ipv4_dst_ops) < ip_rt_max_size) goto out; if (net_ratelimit()) - printk(KERN_WARNING "dst cache overflow\n"); + pr_warn("dst cache overflow\n"); RT_CACHE_STAT_INC(gc_dst_overflow); return 1; @@ -1117,12 +1118,17 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo static const __be32 inaddr_any = 0; struct net_device *dev = dst->dev; const __be32 *pkey = daddr; + const struct rtable *rt; struct neighbour *n; + rt = (const struct rtable *) dst; + if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) pkey = &inaddr_any; + else if (rt->rt_gateway) + pkey = (const __be32 *) &rt->rt_gateway; - n = __ipv4_neigh_lookup(&arp_tbl, dev, *(__force u32 *)pkey); + n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); if (n) return n; return neigh_create(&arp_tbl, pkey, dev); @@ -1177,8 +1183,7 @@ restart: int err = rt_bind_neighbour(rt); if (err) { if (net_ratelimit()) - printk(KERN_WARNING - "Neighbour table failure & not caching routes.\n"); + pr_warn("Neighbour table failure & not caching routes\n"); ip_rt_put(rt); return ERR_PTR(err); } @@ -1254,7 +1259,7 @@ restart: struct net *net = dev_net(rt->dst.dev); int num = ++net->ipv4.current_rt_cache_rebuild_count; if (!rt_caching(net)) { - printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", + pr_warn("%s: %d rebuilds is over limit, route caching disabled\n", rt->dst.dev->name, num); } rt_emergency_hash_rebuild(net); @@ -1295,7 +1300,7 @@ restart: } if (net_ratelimit()) - printk(KERN_WARNING "ipv4: Neighbour table overflow.\n"); + pr_warn("Neighbour table overflow\n"); rt_drop(rt); return ERR_PTR(-ENOBUFS); } @@ -1485,10 +1490,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, peer = rt->peer; if (peer) { - if (peer->redirect_learned.a4 != new_gw || - peer->redirect_genid != redirect_genid) { + if (peer->redirect_learned.a4 != new_gw) { peer->redirect_learned.a4 = new_gw; - peer->redirect_genid = redirect_genid; atomic_inc(&__rt_peer_genid); } check_peer_redir(&rt->dst, peer); @@ -1501,10 +1504,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, reject_redirect: #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n" + pr_info("Redirect from %pI4 on %s about %pI4 ignored\n" " Advised path = %pI4 -> %pI4\n", - &old_gw, dev->name, &new_gw, - &saddr, &daddr); + &old_gw, dev->name, &new_gw, + &saddr, &daddr); #endif ; } @@ -1616,8 +1619,8 @@ void ip_rt_send_redirect(struct sk_buff *skb) if (log_martians && peer->rate_tokens == ip_rt_redirect_number && net_ratelimit()) - printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", - &ip_hdr(skb)->saddr, rt->rt_iif, + pr_warn("host %pI4/if%d ignores redirects for %pI4 to %pI4\n", + &ip_hdr(skb)->saddr, rt->rt_iif, &rt->rt_dst, &rt->rt_gateway); #endif } @@ -1793,8 +1796,6 @@ static void ipv4_validate_peer(struct rtable *rt) if (peer) { check_peer_pmtu(&rt->dst, peer); - if (peer->redirect_genid != redirect_genid) - peer->redirect_learned.a4 = 0; if (peer->redirect_learned.a4 && peer->redirect_learned.a4 != rt->rt_gateway) check_peer_redir(&rt->dst, peer); @@ -1958,8 +1959,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, dst_init_metrics(&rt->dst, peer->metrics, false); check_peer_pmtu(&rt->dst, peer); - if (peer->redirect_genid != redirect_genid) - peer->redirect_learned.a4 = 0; + if (peer->redirect_learned.a4 && peer->redirect_learned.a4 != rt->rt_gateway) { rt->rt_gateway = peer->redirect_learned.a4; @@ -2106,18 +2106,13 @@ static void ip_handle_martian_source(struct net_device *dev, * RFC1812 recommendation, if source is martian, * the only hint is MAC header. */ - printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n", + pr_warn("martian source %pI4 from %pI4, on dev %s\n", &daddr, &saddr, dev->name); if (dev->hard_header_len && skb_mac_header_was_set(skb)) { - int i; - const unsigned char *p = skb_mac_header(skb); - printk(KERN_WARNING "ll header: "); - for (i = 0; i < dev->hard_header_len; i++, p++) { - printk("%02x", *p); - if (i < (dev->hard_header_len - 1)) - printk(":"); - } - printk("\n"); + print_hex_dump(KERN_WARNING, "ll header: ", + DUMP_PREFIX_OFFSET, 16, 1, + skb_mac_header(skb), + dev->hard_header_len, true); } } #endif @@ -2141,8 +2136,7 @@ static int __mkroute_input(struct sk_buff *skb, out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res)); if (out_dev == NULL) { if (net_ratelimit()) - printk(KERN_CRIT "Bug in ip_route_input" \ - "_slow(). Please, report\n"); + pr_crit("Bug in ip_route_input_slow(). Please report.\n"); return -EINVAL; } @@ -2414,7 +2408,7 @@ martian_destination: RT_CACHE_STAT_INC(in_martian_dst); #ifdef CONFIG_IP_ROUTE_VERBOSE if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) - printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n", + pr_warn("martian destination %pI4 from %pI4, dev %s\n", &daddr, &saddr, dev->name); #endif @@ -3491,7 +3485,7 @@ int __init ip_rt_init(void) net_random() % ip_rt_gc_interval + ip_rt_gc_interval); if (ip_rt_proc_init()) - printk(KERN_ERR "Unable to create route proc files\n"); + pr_err("Unable to create route proc files\n"); #ifdef CONFIG_XFRM xfrm_init(); xfrm4_init(ip_rt_max_size); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 51fdbb490437..eab2a7fb15d1 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -278,6 +278,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct rtable *rt; __u8 rcv_wscale; bool ecn_ok = false; + struct flowi4 fl4; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; @@ -346,20 +347,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, * hasn't changed since we received the original syn, but I see * no easy way to do this. */ - { - struct flowi4 fl4; - - flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), - RT_SCOPE_UNIVERSE, IPPROTO_TCP, - inet_sk_flowi_flags(sk), - (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, - ireq->loc_addr, th->source, th->dest); - security_req_classify_flow(req, flowi4_to_flowi(&fl4)); - rt = ip_route_output_key(sock_net(sk), &fl4); - if (IS_ERR(rt)) { - reqsk_free(req); - goto out; - } + flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), + RT_SCOPE_UNIVERSE, IPPROTO_TCP, + inet_sk_flowi_flags(sk), + (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, + ireq->loc_addr, th->source, th->dest); + security_req_classify_flow(req, flowi4_to_flowi(&fl4)); + rt = ip_route_output_key(sock_net(sk), &fl4); + if (IS_ERR(rt)) { + reqsk_free(req); + goto out; } /* Try to redo what tcp_v4_send_synack did. */ @@ -373,5 +370,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, ireq->rcv_wscale = rcv_wscale; ret = get_cookie_sock(sk, skb, req, &rt->dst); + /* ip_queue_xmit() depends on our flow being setup + * Normal sockets get it right from inet_csk_route_child_sock() + */ + if (ret) + inet_sk(ret)->cork.fl.u.ip4 = fl4; out: return ret; } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 4cb9cd2f2c39..7a7724da9bff 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -778,7 +778,6 @@ EXPORT_SYMBOL_GPL(net_ipv4_ctl_path); static __net_init int ipv4_sysctl_init_net(struct net *net) { struct ctl_table *table; - unsigned long limit; table = ipv4_net_table; if (!net_eq(net, &init_net)) { @@ -815,11 +814,6 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) net->ipv4.sysctl_rt_cache_rebuild_count = 4; tcp_init_mem(net); - limit = nr_free_buffer_pages() / 8; - limit = max(limit, 128UL); - net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; - net->ipv4.sysctl_tcp_mem[1] = limit; - net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2; net->ipv4.ipv4_hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 06373b4a449a..cfd7edda0a8e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -245,6 +245,8 @@ * TCP_CLOSE socket is finished */ +#define pr_fmt(fmt) "TCP: " fmt + #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> @@ -1675,7 +1677,8 @@ do_prequeue: if (tp->ucopy.dma_cookie < 0) { - printk(KERN_ALERT "dma_cookie < 0\n"); + pr_alert("%s: dma_cookie < 0\n", + __func__); /* Exception. Bailout! */ if (!copied) @@ -1876,6 +1879,20 @@ void tcp_shutdown(struct sock *sk, int how) } EXPORT_SYMBOL(tcp_shutdown); +bool tcp_check_oom(struct sock *sk, int shift) +{ + bool too_many_orphans, out_of_socket_memory; + + too_many_orphans = tcp_too_many_orphans(sk, shift); + out_of_socket_memory = tcp_out_of_memory(sk); + + if (too_many_orphans && net_ratelimit()) + pr_info("too many orphaned sockets\n"); + if (out_of_socket_memory && net_ratelimit()) + pr_info("out of memory -- consider tuning tcp_mem\n"); + return too_many_orphans || out_of_socket_memory; +} + void tcp_close(struct sock *sk, long timeout) { struct sk_buff *skb; @@ -2015,10 +2032,7 @@ adjudge_to_death: } if (sk->sk_state != TCP_CLOSE) { sk_mem_reclaim(sk); - if (tcp_too_many_orphans(sk, 0)) { - if (net_ratelimit()) - printk(KERN_INFO "TCP: too many of orphaned " - "sockets\n"); + if (tcp_check_oom(sk, 0)) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); NET_INC_STATS_BH(sock_net(sk), @@ -3218,7 +3232,6 @@ __setup("thash_entries=", set_thash_entries); void tcp_init_mem(struct net *net) { - /* Set per-socket limits to no more than 1/128 the pressure threshold */ unsigned long limit = nr_free_buffer_pages() / 8; limit = max(limit, 128UL); net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; @@ -3230,7 +3243,8 @@ void __init tcp_init(void) { struct sk_buff *skb = NULL; unsigned long limit; - int i, max_share, cnt; + int max_share, cnt; + unsigned int i; unsigned long jiffy = jiffies; BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); @@ -3273,7 +3287,7 @@ void __init tcp_init(void) &tcp_hashinfo.bhash_size, NULL, 64 * 1024); - tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size; + tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; for (i = 0; i < tcp_hashinfo.bhash_size; i++) { spin_lock_init(&tcp_hashinfo.bhash[i].lock); INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); @@ -3287,7 +3301,8 @@ void __init tcp_init(void) sysctl_max_syn_backlog = max(128, cnt / 256); tcp_init_mem(&init_net); - limit = nr_free_buffer_pages() / 8; + /* Set per-socket limits to no more than 1/128 the pressure threshold */ + limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10); limit = max(limit, 128UL); max_share = min(4UL*1024*1024, limit); @@ -3299,9 +3314,8 @@ void __init tcp_init(void) sysctl_tcp_rmem[1] = 87380; sysctl_tcp_rmem[2] = max(87380, max_share); - printk(KERN_INFO "TCP: Hash tables configured " - "(established %u bind %u)\n", - tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); + pr_info("Hash tables configured (established %u bind %u)\n", + tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); tcp_register_congestion_control(&tcp_reno); diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index fc6d475f488f..272a84593c85 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -6,6 +6,8 @@ * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org> */ +#define pr_fmt(fmt) "TCP: " fmt + #include <linux/module.h> #include <linux/mm.h> #include <linux/types.h> @@ -41,18 +43,17 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca) /* all algorithms must implement ssthresh and cong_avoid ops */ if (!ca->ssthresh || !ca->cong_avoid) { - printk(KERN_ERR "TCP %s does not implement required ops\n", - ca->name); + pr_err("%s does not implement required ops\n", ca->name); return -EINVAL; } spin_lock(&tcp_cong_list_lock); if (tcp_ca_find(ca->name)) { - printk(KERN_NOTICE "TCP %s already registered\n", ca->name); + pr_notice("%s already registered\n", ca->name); ret = -EEXIST; } else { list_add_tail_rcu(&ca->list, &tcp_cong_list); - printk(KERN_INFO "TCP %s registered\n", ca->name); + pr_info("%s registered\n", ca->name); } spin_unlock(&tcp_cong_list_lock); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 976034f82320..e886e2f7fa8d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -61,6 +61,8 @@ * Pasi Sarolahti: F-RTO for dealing with spurious RTOs */ +#define pr_fmt(fmt) "TCP: " fmt + #include <linux/mm.h> #include <linux/slab.h> #include <linux/module.h> @@ -1307,25 +1309,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, return in_sack; } -static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, - struct tcp_sacktag_state *state, +/* Mark the given newly-SACKed range as such, adjusting counters and hints. */ +static u8 tcp_sacktag_one(struct sock *sk, + struct tcp_sacktag_state *state, u8 sacked, + u32 start_seq, u32 end_seq, int dup_sack, int pcount) { struct tcp_sock *tp = tcp_sk(sk); - u8 sacked = TCP_SKB_CB(skb)->sacked; int fack_count = state->fack_count; /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans && - after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) + after(end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); } /* Nothing to do; acked frame is about to be dropped (was ACKed). */ - if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) + if (!after(end_seq, tp->snd_una)) return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { @@ -1344,13 +1347,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, /* New sack for not retransmitted frame, * which was in hole. It is reordering. */ - if (before(TCP_SKB_CB(skb)->seq, + if (before(start_seq, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); /* SACK enhanced F-RTO (RFC4138; Appendix B) */ - if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) + if (!after(end_seq, tp->frto_highmark)) state->flag |= FLAG_ONLY_ORIG_SACKED; } @@ -1368,8 +1371,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && - before(TCP_SKB_CB(skb)->seq, - TCP_SKB_CB(tp->lost_skb_hint)->seq)) + before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) tp->lost_cnt_hint += pcount; if (fack_count > tp->fackets_out) @@ -1388,6 +1390,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, return sacked; } +/* Shift newly-SACKed bytes from this skb to the immediately previous + * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. + */ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, unsigned int pcount, int shifted, int mss, @@ -1395,9 +1400,20 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); + u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ + u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ BUG_ON(!pcount); + /* Adjust counters and hints for the newly sacked sequence + * range but discard the return value since prev is already + * marked. We must tag the range first because the seq + * advancement below implicitly advances + * tcp_highest_sack_seq() when skb is highest_sack. + */ + tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, + start_seq, end_seq, dup_sack, pcount); + if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; @@ -1424,9 +1440,6 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, skb_shinfo(skb)->gso_type = 0; } - /* We discard results */ - tcp_sacktag_one(skb, sk, state, dup_sack, pcount); - /* Difference in this won't matter, both ACKed by the same cumul. ACK */ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); @@ -1574,6 +1587,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, } } + /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ + if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) + goto fallback; + if (!skb_shift(prev, skb, len)) goto fallback; if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) @@ -1664,10 +1681,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, break; if (in_sack) { - TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, - state, - dup_sack, - tcp_skb_pcount(skb)); + TCP_SKB_CB(skb)->sacked = + tcp_sacktag_one(sk, + state, + TCP_SKB_CB(skb)->sacked, + TCP_SKB_CB(skb)->seq, + TCP_SKB_CB(skb)->end_seq, + dup_sack, + tcp_skb_pcount(skb)); if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) @@ -2554,6 +2575,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) if (cnt > packets) { if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || + (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || (oldcnt >= packets)) break; @@ -3847,9 +3869,9 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o opt_rx->wscale_ok = 1; if (snd_wscale > 14) { if (net_ratelimit()) - printk(KERN_INFO "tcp_parse_options: Illegal window " - "scaling value %d >14 received.\n", - snd_wscale); + pr_info("%s: Illegal window scaling value %d >14 received\n", + __func__, + snd_wscale); snd_wscale = 14; } opt_rx->snd_wscale = snd_wscale; @@ -4171,7 +4193,7 @@ static void tcp_fin(struct sock *sk) /* Only TCP_LISTEN and TCP_CLOSE are left, in these * cases we should never reach this piece of code. */ - printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n", + pr_err("%s: Impossible, sk->sk_state=%d\n", __func__, sk->sk_state); break; } @@ -4424,6 +4446,137 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size) return 0; } +static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) +{ + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb1; + u32 seq, end_seq; + + TCP_ECN_check_ce(tp, skb); + + if (tcp_try_rmem_schedule(sk, skb->truesize)) { + /* TODO: should increment a counter */ + __kfree_skb(skb); + return; + } + + /* Disable header prediction. */ + tp->pred_flags = 0; + inet_csk_schedule_ack(sk); + + SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", + tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); + + skb1 = skb_peek_tail(&tp->out_of_order_queue); + if (!skb1) { + /* Initial out of order segment, build 1 SACK. */ + if (tcp_is_sack(tp)) { + tp->rx_opt.num_sacks = 1; + tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; + tp->selective_acks[0].end_seq = + TCP_SKB_CB(skb)->end_seq; + } + __skb_queue_head(&tp->out_of_order_queue, skb); + goto end; + } + + seq = TCP_SKB_CB(skb)->seq; + end_seq = TCP_SKB_CB(skb)->end_seq; + + if (seq == TCP_SKB_CB(skb1)->end_seq) { + /* Packets in ofo can stay in queue a long time. + * Better try to coalesce them right now + * to avoid future tcp_collapse_ofo_queue(), + * probably the most expensive function in tcp stack. + */ + if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) { + NET_INC_STATS_BH(sock_net(sk), + LINUX_MIB_TCPRCVCOALESCE); + BUG_ON(skb_copy_bits(skb, 0, + skb_put(skb1, skb->len), + skb->len)); + TCP_SKB_CB(skb1)->end_seq = end_seq; + TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq; + __kfree_skb(skb); + skb = NULL; + } else { + __skb_queue_after(&tp->out_of_order_queue, skb1, skb); + } + + if (!tp->rx_opt.num_sacks || + tp->selective_acks[0].end_seq != seq) + goto add_sack; + + /* Common case: data arrive in order after hole. */ + tp->selective_acks[0].end_seq = end_seq; + goto end; + } + + /* Find place to insert this segment. */ + while (1) { + if (!after(TCP_SKB_CB(skb1)->seq, seq)) + break; + if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { + skb1 = NULL; + break; + } + skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); + } + + /* Do skb overlap to previous one? */ + if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { + if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { + /* All the bits are present. Drop. */ + __kfree_skb(skb); + skb = NULL; + tcp_dsack_set(sk, seq, end_seq); + goto add_sack; + } + if (after(seq, TCP_SKB_CB(skb1)->seq)) { + /* Partial overlap. */ + tcp_dsack_set(sk, seq, + TCP_SKB_CB(skb1)->end_seq); + } else { + if (skb_queue_is_first(&tp->out_of_order_queue, + skb1)) + skb1 = NULL; + else + skb1 = skb_queue_prev( + &tp->out_of_order_queue, + skb1); + } + } + if (!skb1) + __skb_queue_head(&tp->out_of_order_queue, skb); + else + __skb_queue_after(&tp->out_of_order_queue, skb1, skb); + + /* And clean segments covered by new one as whole. */ + while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { + skb1 = skb_queue_next(&tp->out_of_order_queue, skb); + + if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) + break; + if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { + tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, + end_seq); + break; + } + __skb_unlink(skb1, &tp->out_of_order_queue); + tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, + TCP_SKB_CB(skb1)->end_seq); + __kfree_skb(skb1); + } + +add_sack: + if (tcp_is_sack(tp)) + tcp_sack_new_ofo_skb(sk, seq, end_seq); +end: + if (skb) + skb_set_owner_r(skb, sk); +} + + static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); @@ -4539,105 +4692,7 @@ drop: goto queue_and_out; } - TCP_ECN_check_ce(tp, skb); - - if (tcp_try_rmem_schedule(sk, skb->truesize)) - goto drop; - - /* Disable header prediction. */ - tp->pred_flags = 0; - inet_csk_schedule_ack(sk); - - SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", - tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); - - skb_set_owner_r(skb, sk); - - if (!skb_peek(&tp->out_of_order_queue)) { - /* Initial out of order segment, build 1 SACK. */ - if (tcp_is_sack(tp)) { - tp->rx_opt.num_sacks = 1; - tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq; - tp->selective_acks[0].end_seq = - TCP_SKB_CB(skb)->end_seq; - } - __skb_queue_head(&tp->out_of_order_queue, skb); - } else { - struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue); - u32 seq = TCP_SKB_CB(skb)->seq; - u32 end_seq = TCP_SKB_CB(skb)->end_seq; - - if (seq == TCP_SKB_CB(skb1)->end_seq) { - __skb_queue_after(&tp->out_of_order_queue, skb1, skb); - - if (!tp->rx_opt.num_sacks || - tp->selective_acks[0].end_seq != seq) - goto add_sack; - - /* Common case: data arrive in order after hole. */ - tp->selective_acks[0].end_seq = end_seq; - return; - } - - /* Find place to insert this segment. */ - while (1) { - if (!after(TCP_SKB_CB(skb1)->seq, seq)) - break; - if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { - skb1 = NULL; - break; - } - skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); - } - - /* Do skb overlap to previous one? */ - if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { - if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { - /* All the bits are present. Drop. */ - __kfree_skb(skb); - tcp_dsack_set(sk, seq, end_seq); - goto add_sack; - } - if (after(seq, TCP_SKB_CB(skb1)->seq)) { - /* Partial overlap. */ - tcp_dsack_set(sk, seq, - TCP_SKB_CB(skb1)->end_seq); - } else { - if (skb_queue_is_first(&tp->out_of_order_queue, - skb1)) - skb1 = NULL; - else - skb1 = skb_queue_prev( - &tp->out_of_order_queue, - skb1); - } - } - if (!skb1) - __skb_queue_head(&tp->out_of_order_queue, skb); - else - __skb_queue_after(&tp->out_of_order_queue, skb1, skb); - - /* And clean segments covered by new one as whole. */ - while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { - skb1 = skb_queue_next(&tp->out_of_order_queue, skb); - - if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) - break; - if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { - tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, - end_seq); - break; - } - __skb_unlink(skb1, &tp->out_of_order_queue); - tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, - TCP_SKB_CB(skb1)->end_seq); - __kfree_skb(skb1); - } - -add_sack: - if (tcp_is_sack(tp)) - tcp_sack_new_ofo_skb(sk, seq, end_seq); - } + tcp_data_queue_ofo(sk, skb); } static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 337ba4cca052..3a25cf743f8b 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -50,6 +50,7 @@ * a single port at the same time. */ +#define pr_fmt(fmt) "TCP: " fmt #include <linux/bottom_half.h> #include <linux/types.h> @@ -90,16 +91,8 @@ EXPORT_SYMBOL(sysctl_tcp_low_latency); #ifdef CONFIG_TCP_MD5SIG -static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, - __be32 addr); -static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, +static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); -#else -static inline -struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) -{ - return NULL; -} #endif struct inet_hashinfo tcp_hashinfo; @@ -601,6 +594,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) struct ip_reply_arg arg; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; + const __u8 *hash_location = NULL; + unsigned char newhash[16]; + int genhash; + struct sock *sk1 = NULL; #endif struct net *net; @@ -631,7 +628,36 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.iov[0].iov_len = sizeof(rep.th); #ifdef CONFIG_TCP_MD5SIG - key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL; + hash_location = tcp_parse_md5sig_option(th); + if (!sk && hash_location) { + /* + * active side is lost. Try to find listening socket through + * source port, and then find md5 key through listening socket. + * we are not loose security here: + * Incoming packet is checked with md5 hash with finding key, + * no RST generated if md5 hash doesn't match. + */ + sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev), + &tcp_hashinfo, ip_hdr(skb)->daddr, + ntohs(th->source), inet_iif(skb)); + /* don't send rst if it can't find key */ + if (!sk1) + return; + rcu_read_lock(); + key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) + &ip_hdr(skb)->saddr, AF_INET); + if (!key) + goto release_sk1; + + genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb); + if (genhash || memcmp(hash_location, newhash, 16) != 0) + goto release_sk1; + } else { + key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *) + &ip_hdr(skb)->saddr, + AF_INET) : NULL; + } + if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | @@ -651,6 +677,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; + /* When socket is gone, all binding information is lost. + * routing might fail in this case. using iif for oif to + * make sure we can deliver it + */ + arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb); net = dev_net(skb_dst(skb)->dev); arg.tos = ip_hdr(skb)->tos; @@ -659,6 +690,14 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); + +#ifdef CONFIG_TCP_MD5SIG +release_sk1: + if (sk1) { + rcu_read_unlock(); + sock_put(sk1); + } +#endif } /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states @@ -759,7 +798,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd, req->ts_recent, 0, - tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr), + tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, + AF_INET), inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, ip_hdr(skb)->tos); } @@ -837,8 +877,7 @@ int tcp_syn_flood_action(struct sock *sk, lopt = inet_csk(sk)->icsk_accept_queue.listen_opt; if (!lopt->synflood_warned) { lopt->synflood_warned = 1; - pr_info("%s: Possible SYN flooding on port %d. %s. " - " Check SNMP counters.\n", + pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", proto, ntohs(tcp_hdr(skb)->dest), msg); } return want_cookie; @@ -876,153 +915,138 @@ static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk, */ /* Find the Key structure for an address. */ -static struct tcp_md5sig_key * - tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr) +struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk, + const union tcp_md5_addr *addr, + int family) { struct tcp_sock *tp = tcp_sk(sk); - int i; - - if (!tp->md5sig_info || !tp->md5sig_info->entries4) + struct tcp_md5sig_key *key; + struct hlist_node *pos; + unsigned int size = sizeof(struct in_addr); + struct tcp_md5sig_info *md5sig; + + /* caller either holds rcu_read_lock() or socket lock */ + md5sig = rcu_dereference_check(tp->md5sig_info, + sock_owned_by_user(sk) || + lockdep_is_held(&sk->sk_lock.slock)); + if (!md5sig) return NULL; - for (i = 0; i < tp->md5sig_info->entries4; i++) { - if (tp->md5sig_info->keys4[i].addr == addr) - return &tp->md5sig_info->keys4[i].base; +#if IS_ENABLED(CONFIG_IPV6) + if (family == AF_INET6) + size = sizeof(struct in6_addr); +#endif + hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) { + if (key->family != family) + continue; + if (!memcmp(&key->addr, addr, size)) + return key; } return NULL; } +EXPORT_SYMBOL(tcp_md5_do_lookup); struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk, struct sock *addr_sk) { - return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr); + union tcp_md5_addr *addr; + + addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr; + return tcp_md5_do_lookup(sk, addr, AF_INET); } EXPORT_SYMBOL(tcp_v4_md5_lookup); static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, struct request_sock *req) { - return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr); + union tcp_md5_addr *addr; + + addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr; + return tcp_md5_do_lookup(sk, addr, AF_INET); } /* This can be called on a newly created socket, from other files */ -int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, - u8 *newkey, u8 newkeylen) +int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, + int family, const u8 *newkey, u8 newkeylen, gfp_t gfp) { /* Add Key to the list */ struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); - struct tcp4_md5sig_key *keys; + struct tcp_md5sig_info *md5sig; - key = tcp_v4_md5_do_lookup(sk, addr); + key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); if (key) { /* Pre-existing entry - just update that one. */ - kfree(key->key); - key->key = newkey; + memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; - } else { - struct tcp_md5sig_info *md5sig; - - if (!tp->md5sig_info) { - tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), - GFP_ATOMIC); - if (!tp->md5sig_info) { - kfree(newkey); - return -ENOMEM; - } - sk_nocaps_add(sk, NETIF_F_GSO_MASK); - } + return 0; + } - md5sig = tp->md5sig_info; - if (md5sig->entries4 == 0 && - tcp_alloc_md5sig_pool(sk) == NULL) { - kfree(newkey); + md5sig = rcu_dereference_protected(tp->md5sig_info, + sock_owned_by_user(sk)); + if (!md5sig) { + md5sig = kmalloc(sizeof(*md5sig), gfp); + if (!md5sig) return -ENOMEM; - } - - if (md5sig->alloced4 == md5sig->entries4) { - keys = kmalloc((sizeof(*keys) * - (md5sig->entries4 + 1)), GFP_ATOMIC); - if (!keys) { - kfree(newkey); - if (md5sig->entries4 == 0) - tcp_free_md5sig_pool(); - return -ENOMEM; - } - if (md5sig->entries4) - memcpy(keys, md5sig->keys4, - sizeof(*keys) * md5sig->entries4); + sk_nocaps_add(sk, NETIF_F_GSO_MASK); + INIT_HLIST_HEAD(&md5sig->head); + rcu_assign_pointer(tp->md5sig_info, md5sig); + } - /* Free old key list, and reference new one */ - kfree(md5sig->keys4); - md5sig->keys4 = keys; - md5sig->alloced4++; - } - md5sig->entries4++; - md5sig->keys4[md5sig->entries4 - 1].addr = addr; - md5sig->keys4[md5sig->entries4 - 1].base.key = newkey; - md5sig->keys4[md5sig->entries4 - 1].base.keylen = newkeylen; + key = sock_kmalloc(sk, sizeof(*key), gfp); + if (!key) + return -ENOMEM; + if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) { + sock_kfree_s(sk, key, sizeof(*key)); + return -ENOMEM; } - return 0; -} -EXPORT_SYMBOL(tcp_v4_md5_do_add); -static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, - u8 *newkey, u8 newkeylen) -{ - return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->inet_daddr, - newkey, newkeylen); + memcpy(key->key, newkey, newkeylen); + key->keylen = newkeylen; + key->family = family; + memcpy(&key->addr, addr, + (family == AF_INET6) ? sizeof(struct in6_addr) : + sizeof(struct in_addr)); + hlist_add_head_rcu(&key->node, &md5sig->head); + return 0; } +EXPORT_SYMBOL(tcp_md5_do_add); -int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) +int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) { struct tcp_sock *tp = tcp_sk(sk); - int i; - - for (i = 0; i < tp->md5sig_info->entries4; i++) { - if (tp->md5sig_info->keys4[i].addr == addr) { - /* Free the key */ - kfree(tp->md5sig_info->keys4[i].base.key); - tp->md5sig_info->entries4--; - - if (tp->md5sig_info->entries4 == 0) { - kfree(tp->md5sig_info->keys4); - tp->md5sig_info->keys4 = NULL; - tp->md5sig_info->alloced4 = 0; - tcp_free_md5sig_pool(); - } else if (tp->md5sig_info->entries4 != i) { - /* Need to do some manipulation */ - memmove(&tp->md5sig_info->keys4[i], - &tp->md5sig_info->keys4[i+1], - (tp->md5sig_info->entries4 - i) * - sizeof(struct tcp4_md5sig_key)); - } - return 0; - } - } - return -ENOENT; + struct tcp_md5sig_key *key; + struct tcp_md5sig_info *md5sig; + + key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET); + if (!key) + return -ENOENT; + hlist_del_rcu(&key->node); + atomic_sub(sizeof(*key), &sk->sk_omem_alloc); + kfree_rcu(key, rcu); + md5sig = rcu_dereference_protected(tp->md5sig_info, + sock_owned_by_user(sk)); + if (hlist_empty(&md5sig->head)) + tcp_free_md5sig_pool(); + return 0; } -EXPORT_SYMBOL(tcp_v4_md5_do_del); +EXPORT_SYMBOL(tcp_md5_do_del); -static void tcp_v4_clear_md5_list(struct sock *sk) +void tcp_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); + struct tcp_md5sig_key *key; + struct hlist_node *pos, *n; + struct tcp_md5sig_info *md5sig; - /* Free each key, then the set of key keys, - * the crypto element, and then decrement our - * hold on the last resort crypto. - */ - if (tp->md5sig_info->entries4) { - int i; - for (i = 0; i < tp->md5sig_info->entries4; i++) - kfree(tp->md5sig_info->keys4[i].base.key); - tp->md5sig_info->entries4 = 0; + md5sig = rcu_dereference_protected(tp->md5sig_info, 1); + + if (!hlist_empty(&md5sig->head)) tcp_free_md5sig_pool(); - } - if (tp->md5sig_info->keys4) { - kfree(tp->md5sig_info->keys4); - tp->md5sig_info->keys4 = NULL; - tp->md5sig_info->alloced4 = 0; + hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) { + hlist_del_rcu(&key->node); + atomic_sub(sizeof(*key), &sk->sk_omem_alloc); + kfree_rcu(key, rcu); } } @@ -1031,7 +1055,6 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; - u8 *newkey; if (optlen < sizeof(cmd)) return -EINVAL; @@ -1042,32 +1065,16 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, if (sin->sin_family != AF_INET) return -EINVAL; - if (!cmd.tcpm_key || !cmd.tcpm_keylen) { - if (!tcp_sk(sk)->md5sig_info) - return -ENOENT; - return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr); - } + if (!cmd.tcpm_key || !cmd.tcpm_keylen) + return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, + AF_INET); if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; - if (!tcp_sk(sk)->md5sig_info) { - struct tcp_sock *tp = tcp_sk(sk); - struct tcp_md5sig_info *p; - - p = kzalloc(sizeof(*p), sk->sk_allocation); - if (!p) - return -EINVAL; - - tp->md5sig_info = p; - sk_nocaps_add(sk, NETIF_F_GSO_MASK); - } - - newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation); - if (!newkey) - return -ENOMEM; - return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr, - newkey, cmd.tcpm_keylen); + return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, + AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, + GFP_KERNEL); } static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, @@ -1093,7 +1100,7 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp)); } -static int tcp_v4_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, +static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; @@ -1193,7 +1200,8 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) int genhash; unsigned char newhash[16]; - hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr); + hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, + AF_INET); hash_location = tcp_parse_md5sig_option(th); /* We've parsed the options - do we have a hash? */ @@ -1219,10 +1227,10 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb) if (genhash || memcmp(hash_location, newhash, 16) != 0) { if (net_ratelimit()) { - printk(KERN_INFO "MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", - &iph->saddr, ntohs(th->source), - &iph->daddr, ntohs(th->dest), - genhash ? " tcp_v4_calc_md5_hash failed" : ""); + pr_info("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", + &iph->saddr, ntohs(th->source), + &iph->daddr, ntohs(th->dest), + genhash ? " tcp_v4_calc_md5_hash failed" : ""); } return 1; } @@ -1391,7 +1399,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) * to destinations, already remembered * to the moment of synflood. */ - LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI4/%u\n", + LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"), &saddr, ntohs(tcp_hdr(skb)->source)); goto drop_and_release; } @@ -1456,14 +1464,19 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; + newinet->rcv_tos = ip_hdr(skb)->tos; inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; newinet->inet_id = newtp->write_seq ^ jiffies; - if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) - goto put_and_exit; - + if (!dst) { + dst = inet_csk_route_child_sock(sk, newsk, req); + if (!dst) + goto put_and_exit; + } else { + /* syncookie case : see end of cookie_v4_check() */ + } sk_setup_caps(newsk, dst); tcp_mtup_init(newsk); @@ -1481,7 +1494,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ - key = tcp_v4_md5_do_lookup(sk, newinet->inet_daddr); + key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, + AF_INET); if (key != NULL) { /* * We're using one, so create a matching key @@ -1489,10 +1503,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, * memory, then we end up not copying the key * across. Shucks. */ - char *newkey = kmemdup(key->key, key->keylen, GFP_ATOMIC); - if (newkey != NULL) - tcp_v4_md5_do_add(newsk, newinet->inet_daddr, - newkey, key->keylen); + tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr, + AF_INET, key->key, key->keylen, GFP_ATOMIC); sk_nocaps_add(newsk, NETIF_F_GSO_MASK); } #endif @@ -1853,7 +1865,6 @@ EXPORT_SYMBOL(ipv4_specific); static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, - .md5_add = tcp_v4_md5_add_func, .md5_parse = tcp_v4_parse_md5_keys, }; #endif @@ -1942,8 +1953,8 @@ void tcp_v4_destroy_sock(struct sock *sk) #ifdef CONFIG_TCP_MD5SIG /* Clean up the MD5 key list, if any */ if (tp->md5sig_info) { - tcp_v4_clear_md5_list(sk); - kfree(tp->md5sig_info); + tcp_clear_md5_list(sk); + kfree_rcu(tp->md5sig_info, rcu); tp->md5sig_info = NULL; } #endif diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 49978788a9dc..e795272fbe9e 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -94,7 +94,7 @@ create_files: } EXPORT_SYMBOL(tcp_init_cgroup); -void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) +void tcp_destroy_cgroup(struct cgroup *cgrp) { struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp); struct cg_proto *cg_proto; @@ -111,7 +111,7 @@ void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss) val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); if (val != RESOURCE_MAX) - jump_label_dec(&memcg_socket_limit_enabled); + static_key_slow_dec(&memcg_socket_limit_enabled); } EXPORT_SYMBOL(tcp_destroy_cgroup); @@ -143,9 +143,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) net->ipv4.sysctl_tcp_mem[i]); if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) - jump_label_dec(&memcg_socket_limit_enabled); + static_key_slow_dec(&memcg_socket_limit_enabled); else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) - jump_label_inc(&memcg_socket_limit_enabled); + static_key_slow_inc(&memcg_socket_limit_enabled); return 0; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 550e755747e0..3cabafb5cdd1 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -359,13 +359,11 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) */ do { struct tcp_md5sig_key *key; - memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); - tcptw->tw_md5_keylen = 0; + tcptw->tw_md5_key = NULL; key = tp->af_specific->md5_lookup(sk, sk); if (key != NULL) { - memcpy(&tcptw->tw_md5_key, key->key, key->keylen); - tcptw->tw_md5_keylen = key->keylen; - if (tcp_alloc_md5sig_pool(sk) == NULL) + tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); + if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL) BUG(); } } while (0); @@ -405,8 +403,10 @@ void tcp_twsk_destructor(struct sock *sk) { #ifdef CONFIG_TCP_MD5SIG struct tcp_timewait_sock *twsk = tcp_twsk(sk); - if (twsk->tw_md5_keylen) + if (twsk->tw_md5_key) { tcp_free_md5sig_pool(); + kfree_rcu(twsk->tw_md5_key, rcu); + } #endif } EXPORT_SYMBOL_GPL(tcp_twsk_destructor); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 4ff3b6dc74fc..364784a91939 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2306,8 +2306,10 @@ begin_fwd: if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) continue; - if (tcp_retransmit_skb(sk, skb)) + if (tcp_retransmit_skb(sk, skb)) { + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); return; + } NET_INC_STATS_BH(sock_net(sk), mib_idx); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Recovery) diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 85ee7eb7e38e..a981cdc0a6e9 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c @@ -18,6 +18,8 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/socket.h> @@ -239,7 +241,7 @@ static __init int tcpprobe_init(void) if (ret) goto err1; - pr_info("TCP probe registered (port=%d) bufsize=%u\n", port, bufsize); + pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize); return 0; err1: proc_net_remove(&init_net, procname); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index a516d1e399df..34d4a02c2f16 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -77,10 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset) if (sk->sk_err_soft) shift++; - if (tcp_too_many_orphans(sk, shift)) { - if (net_ratelimit()) - printk(KERN_INFO "Out of socket memory\n"); - + if (tcp_check_oom(sk, shift)) { /* Catch exceptional cases, when connection requires reset. * 1. Last segment was sent recently. */ if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || @@ -336,16 +333,18 @@ void tcp_retransmit_timer(struct sock *sk) */ struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { - LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", - &inet->inet_daddr, ntohs(inet->inet_dport), - inet->inet_num, tp->snd_una, tp->snd_nxt); + LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"), + &inet->inet_daddr, + ntohs(inet->inet_dport), inet->inet_num, + tp->snd_una, tp->snd_nxt); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); - LIMIT_NETDEBUG(KERN_DEBUG "TCP: Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", - &np->daddr, ntohs(inet->inet_dport), - inet->inet_num, tp->snd_una, tp->snd_nxt); + LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"), + &np->daddr, + ntohs(inet->inet_dport), inet->inet_num, + tp->snd_una, tp->snd_nxt); } #endif if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c index 01775983b997..0d0171830620 100644 --- a/net/ipv4/tunnel4.c +++ b/net/ipv4/tunnel4.c @@ -164,12 +164,12 @@ static const struct net_protocol tunnel64_protocol = { static int __init tunnel4_init(void) { if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP)) { - printk(KERN_ERR "tunnel4 init: can't add protocol\n"); + pr_err("%s: can't add protocol\n", __func__); return -EAGAIN; } #if IS_ENABLED(CONFIG_IPV6) if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) { - printk(KERN_ERR "tunnel64 init: can't add protocol\n"); + pr_err("tunnel64 init: can't add protocol\n"); inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP); return -EAGAIN; } @@ -181,10 +181,10 @@ static void __exit tunnel4_fini(void) { #if IS_ENABLED(CONFIG_IPV6) if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6)) - printk(KERN_ERR "tunnel64 close: can't remove protocol\n"); + pr_err("tunnel64 close: can't remove protocol\n"); #endif if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP)) - printk(KERN_ERR "tunnel4 close: can't remove protocol\n"); + pr_err("tunnel4 close: can't remove protocol\n"); } module_init(tunnel4_init); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5d075b5f70fc..d6f5feeb3eaf 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -77,6 +77,8 @@ * 2 of the License, or (at your option) any later version. */ +#define pr_fmt(fmt) "UDP: " fmt + #include <asm/system.h> #include <asm/uaccess.h> #include <asm/ioctls.h> @@ -917,7 +919,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (!saddr) saddr = inet->mc_addr; connected = 0; - } + } else if (!ipc.oif) + ipc.oif = inet->uc_index; if (connected) rt = (struct rtable *)sk_dst_check(sk, 0); @@ -974,7 +977,7 @@ back_from_confirm: /* ... which is an evident application bug. --ANK */ release_sock(sk); - LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); + LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n")); err = -EINVAL; goto out; } @@ -1053,7 +1056,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, if (unlikely(!up->pending)) { release_sock(sk); - LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n"); + LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n")); return -EINVAL; } @@ -1166,7 +1169,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; struct sk_buff *skb; unsigned int ulen, copied; - int peeked; + int peeked, off = 0; int err; int is_udplite = IS_UDPLITE(sk); bool slow; @@ -1182,7 +1185,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, try_again: skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), - &peeked, &err); + &peeked, &off, &err); if (!skb) goto out; @@ -1446,9 +1449,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) * provided by the application." */ if (up->pcrlen == 0) { /* full coverage was set */ - LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage " - "%d while full coverage %d requested\n", - UDP_SKB_CB(skb)->cscov, skb->len); + LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n", + UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } /* The next case involves violating the min. coverage requested @@ -1458,9 +1460,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) * Therefore the above ...()->partial_cov statement is essential. */ if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { - LIMIT_NETDEBUG(KERN_WARNING - "UDPLITE: coverage %d too small, need min %d\n", - UDP_SKB_CB(skb)->cscov, up->pcrlen); + LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n", + UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } @@ -1688,13 +1689,10 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", - proto == IPPROTO_UDPLITE ? "-Lite" : "", - &saddr, - ntohs(uh->source), - ulen, - skb->len, - &daddr, - ntohs(uh->dest)); + proto == IPPROTO_UDPLITE ? "Lite" : "", + &saddr, ntohs(uh->source), + ulen, skb->len, + &daddr, ntohs(uh->dest)); goto drop; csum_error: @@ -1703,11 +1701,8 @@ csum_error: * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", - proto == IPPROTO_UDPLITE ? "-Lite" : "", - &saddr, - ntohs(uh->source), - &daddr, - ntohs(uh->dest), + proto == IPPROTO_UDPLITE ? "Lite" : "", + &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), ulen); drop: UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index 12e9499a1a6c..2c46acd4cc36 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c @@ -10,6 +10,9 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ + +#define pr_fmt(fmt) "UDPLite: " fmt + #include <linux/export.h> #include "udp_impl.h" @@ -129,11 +132,11 @@ void __init udplite4_register(void) inet_register_protosw(&udplite4_protosw); if (udplite4_proc_init()) - printk(KERN_ERR "%s: Cannot register /proc!\n", __func__); + pr_err("%s: Cannot register /proc!\n", __func__); return; out_unregister_proto: proto_unregister(&udplite_prot); out_register_err: - printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); + pr_crit("%s: Cannot add UDP-Lite protocol\n", __func__); } diff --git a/net/ipv4/xfrm4_mode_beet.c b/net/ipv4/xfrm4_mode_beet.c index 63418185f524..e3db3f915114 100644 --- a/net/ipv4/xfrm4_mode_beet.c +++ b/net/ipv4/xfrm4_mode_beet.c @@ -110,10 +110,7 @@ static int xfrm4_beet_input(struct xfrm_state *x, struct sk_buff *skb) skb_push(skb, sizeof(*iph)); skb_reset_network_header(skb); - - memmove(skb->data - skb->mac_len, skb_mac_header(skb), - skb->mac_len); - skb_set_mac_header(skb, -skb->mac_len); + skb_mac_header_rebuild(skb); xfrm4_beet_make_header(skb); diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 534972e114ac..ed4bf11ef9f4 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -66,7 +66,6 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) { - const unsigned char *old_mac; int err = -EINVAL; if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) @@ -84,10 +83,9 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb) if (!(x->props.flags & XFRM_STATE_NOECN)) ipip_ecn_decapsulate(skb); - old_mac = skb_mac_header(skb); - skb_set_mac_header(skb, -skb->mac_len); - memmove(skb_mac_header(skb), old_mac, skb->mac_len); skb_reset_network_header(skb); + skb_mac_header_rebuild(skb); + err = 0; out: diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index 9247d9d70e9d..05a5df2febc9 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c @@ -3,6 +3,8 @@ * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ +#define pr_fmt(fmt) "IPsec: " fmt + #include <linux/skbuff.h> #include <linux/module.h> #include <linux/mutex.h> @@ -75,18 +77,18 @@ static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = { static int __init ipip_init(void) { if (xfrm_register_type(&ipip_type, AF_INET) < 0) { - printk(KERN_INFO "ipip init: can't add xfrm type\n"); + pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) { - printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET\n"); + pr_info("%s: can't add xfrm handler for AF_INET\n", __func__); xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; } #if IS_ENABLED(CONFIG_IPV6) if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) { - printk(KERN_INFO "ipip init: can't add xfrm handler for AF_INET6\n"); + pr_info("%s: can't add xfrm handler for AF_INET6\n", __func__); xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET); xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; @@ -99,12 +101,14 @@ static void __exit ipip_fini(void) { #if IS_ENABLED(CONFIG_IPV6) if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6)) - printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET6\n"); + pr_info("%s: can't remove xfrm handler for AF_INET6\n", + __func__); #endif if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET)) - printk(KERN_INFO "ipip close: can't remove xfrm handler for AF_INET\n"); + pr_info("%s: can't remove xfrm handler for AF_INET\n", + __func__); if (xfrm_unregister_type(&ipip_type, AF_INET) < 0) - printk(KERN_INFO "ipip close: can't remove xfrm type\n"); + pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ipip_init); |