diff options
Diffstat (limited to 'net')
249 files changed, 7647 insertions, 3769 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 0d441ec8763e..376805005cc7 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/netdevice.h> +#include <linux/net_tstamp.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <net/arp.h> @@ -150,7 +151,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, u16 vlan_tci; vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority); - skb = __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); + __vlan_hwaccel_put_tag(skb, vlan->vlan_proto, vlan_tci); } skb->dev = vlan->real_dev; @@ -669,6 +670,23 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev, strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); } +static int vlan_ethtool_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + const struct ethtool_ops *ops = vlan->real_dev->ethtool_ops; + + if (ops->get_ts_info) { + return ops->get_ts_info(vlan->real_dev, info); + } else { + info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + info->phc_index = -1; + } + + return 0; +} + static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct vlan_pcpu_stats *p; @@ -752,6 +770,7 @@ static const struct ethtool_ops vlan_ethtool_ops = { .get_settings = vlan_ethtool_get_settings, .get_drvinfo = vlan_ethtool_get_drvinfo, .get_link = ethtool_op_get_link, + .get_ts_info = vlan_ethtool_get_ts_info, }; static const struct net_device_ops vlan_netdev_ops = { diff --git a/net/Kconfig b/net/Kconfig index 6272420a721b..99815b5454bf 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -6,7 +6,7 @@ menuconfig NET bool "Networking support" select NLATTR select GENERIC_NET_UTILS - select ANON_INODES + select BPF ---help--- Unless you really know what you are doing, you should say Y here. The reason is that some programs need kernel networking support even diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index c00897f65a31..425942db17f6 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1758,7 +1758,7 @@ static int atalk_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr copied = size; msg->msg_flags |= MSG_TRUNC; } - err = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, offset, msg, copied); if (!err && msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_at *, sat, msg->msg_name); diff --git a/net/atm/common.c b/net/atm/common.c index 6a765156a3f6..9cd1ccae9a11 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -554,7 +554,7 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, msg->msg_flags |= MSG_TRUNC; } - error = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + error = skb_copy_datagram_msg(skb, 0, msg, copied); if (error) return error; sock_recv_ts_and_drops(msg, sk, skb); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index c35c3f48fc0f..f4f835e19378 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1634,7 +1634,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + skb_copy_datagram_msg(skb, 0, msg, copied); if (msg->msg_name) { ax25_digi digi; diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 339c74ad4553..0a7cc565f93e 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -237,7 +237,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, } skb_reset_transport_header(skb); - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err == 0) { sock_recv_ts_and_drops(msg, sk, skb); @@ -328,7 +328,7 @@ int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, } chunk = min_t(unsigned int, skb->len, size); - if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, chunk)) { + if (skb_copy_datagram_msg(skb, 0, msg, chunk)) { skb_queue_head(&sk->sk_receive_queue, skb); if (!copied) copied = -EFAULT; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index bbc4ac748263..5e2cd2535978 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -878,7 +878,7 @@ static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, } skb_reset_transport_header(skb); - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); switch (hci_pi(sk)->channel) { case HCI_CHANNEL_RAW: diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 992ec49a96aa..f96933a823e3 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) kfree_skb(skb); } +EXPORT_SYMBOL_GPL(br_deliver); /* called with rcu_read_lock */ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) @@ -183,6 +184,11 @@ static void br_flood(struct net_bridge *br, struct sk_buff *skb, /* Do not flood unicast traffic to ports that turn it off */ if (unicast && !(p->flags & BR_FLOOD)) continue; + + /* Do not flood to ports that enable proxy ARP */ + if (p->flags & BR_PROXYARP) + continue; + prev = maybe_deliver(prev, p, skb, __packet_hook); if (IS_ERR(prev)) goto out; diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 6fd5522df696..1f1de715197c 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -16,6 +16,8 @@ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/netfilter_bridge.h> +#include <linux/neighbour.h> +#include <net/arp.h> #include <linux/export.h> #include <linux/rculist.h> #include "br_private.h" @@ -57,6 +59,60 @@ static int br_pass_frame_up(struct sk_buff *skb) netif_receive_skb); } +static void br_do_proxy_arp(struct sk_buff *skb, struct net_bridge *br, + u16 vid) +{ + struct net_device *dev = br->dev; + struct neighbour *n; + struct arphdr *parp; + u8 *arpptr, *sha; + __be32 sip, tip; + + if (dev->flags & IFF_NOARP) + return; + + if (!pskb_may_pull(skb, arp_hdr_len(dev))) { + dev->stats.tx_dropped++; + return; + } + parp = arp_hdr(skb); + + if (parp->ar_pro != htons(ETH_P_IP) || + parp->ar_op != htons(ARPOP_REQUEST) || + parp->ar_hln != dev->addr_len || + parp->ar_pln != 4) + return; + + arpptr = (u8 *)parp + sizeof(struct arphdr); + sha = arpptr; + arpptr += dev->addr_len; /* sha */ + memcpy(&sip, arpptr, sizeof(sip)); + arpptr += sizeof(sip); + arpptr += dev->addr_len; /* tha */ + memcpy(&tip, arpptr, sizeof(tip)); + + if (ipv4_is_loopback(tip) || + ipv4_is_multicast(tip)) + return; + + n = neigh_lookup(&arp_tbl, &tip, dev); + if (n) { + struct net_bridge_fdb_entry *f; + + if (!(n->nud_state & NUD_VALID)) { + neigh_release(n); + return; + } + + f = __br_fdb_get(br, n->ha, vid); + if (f) + arp_send(ARPOP_REPLY, ETH_P_ARP, sip, skb->dev, tip, + sha, n->ha, sha); + + neigh_release(n); + } +} + /* note: already called with rcu_read_lock */ int br_handle_frame_finish(struct sk_buff *skb) { @@ -98,6 +154,10 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; if (is_broadcast_ether_addr(dest)) { + if (p->flags & BR_PROXYARP && + skb->protocol == htons(ETH_P_ARP)) + br_do_proxy_arp(skb, br, vid); + skb2 = skb; unicast = false; } else if (is_multicast_ether_addr(dest)) { diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 1bada53bb195..1a4f32c09ad5 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -192,7 +192,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb) static int br_parse_ip_options(struct sk_buff *skb) { - struct ip_options *opt; const struct iphdr *iph; struct net_device *dev = skb->dev; u32 len; @@ -201,7 +200,6 @@ static int br_parse_ip_options(struct sk_buff *skb) goto inhdr_error; iph = ip_hdr(skb); - opt = &(IPCB(skb)->opt); /* Basic sanity checks */ if (iph->ihl < 5 || iph->version != 4) @@ -227,23 +225,11 @@ static int br_parse_ip_options(struct sk_buff *skb) } memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); - if (iph->ihl == 5) - return 0; - - opt->optlen = iph->ihl*4 - sizeof(struct iphdr); - if (ip_options_compile(dev_net(dev), opt, skb)) - goto inhdr_error; - - /* Check correct handling of SRR option */ - if (unlikely(opt->srr)) { - struct in_device *in_dev = __in_dev_get_rcu(dev); - if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev)) - goto drop; - - if (ip_options_rcv_srr(skb)) - goto drop; - } - + /* We should really parse IP options here but until + * somebody who actually uses IP options complains to + * us we'll just silently ignore the options because + * we're lazy! + */ return 0; inhdr_error: diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 2ff9706647f2..86c239b06f6e 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -60,7 +60,8 @@ static int br_port_fill_attrs(struct sk_buff *skb, nla_put_u8(skb, IFLA_BRPORT_PROTECT, !!(p->flags & BR_ROOT_BLOCK)) || nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, !!(p->flags & BR_MULTICAST_FAST_LEAVE)) || nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) || - nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD))) + nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, !!(p->flags & BR_FLOOD)) || + nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP))) return -EMSGSIZE; return 0; @@ -332,6 +333,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[]) br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK); br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING); br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD); + br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP); if (tb[IFLA_BRPORT_COST]) { err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 4d783d071305..8f3f08140258 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -172,6 +172,7 @@ struct net_bridge_port #define BR_FLOOD 0x00000040 #define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING) #define BR_PROMISC 0x00000080 +#define BR_PROXYARP 0x00000100 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING struct bridge_mcast_own_query ip4_own_query; diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c index e561cd59b8a6..2de5d91199e8 100644 --- a/net/bridge/br_sysfs_if.c +++ b/net/bridge/br_sysfs_if.c @@ -170,6 +170,7 @@ BRPORT_ATTR_FLAG(bpdu_guard, BR_BPDU_GUARD); BRPORT_ATTR_FLAG(root_block, BR_ROOT_BLOCK); BRPORT_ATTR_FLAG(learning, BR_LEARNING); BRPORT_ATTR_FLAG(unicast_flood, BR_FLOOD); +BRPORT_ATTR_FLAG(proxyarp, BR_PROXYARP); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf) @@ -213,6 +214,7 @@ static const struct brport_attribute *brport_attrs[] = { &brport_attr_multicast_router, &brport_attr_multicast_fast_leave, #endif + &brport_attr_proxyarp, NULL }; diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index 150048fb99b0..97b8ddf57363 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -199,8 +199,8 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v, if (skb->vlan_proto != proto) { /* Protocol-mismatch, empty out vlan_tci for new tag */ skb_push(skb, ETH_HLEN); - skb = __vlan_put_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); if (unlikely(!skb)) return false; diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c index da17a5eab8b4..074c557ab505 100644 --- a/net/bridge/netfilter/nf_tables_bridge.c +++ b/net/bridge/netfilter/nf_tables_bridge.c @@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = { .type = NFT_CHAIN_T_DEFAULT, .family = NFPROTO_BRIDGE, .owner = THIS_MODULE, - .hook_mask = (1 << NF_BR_LOCAL_IN) | + .hook_mask = (1 << NF_BR_PRE_ROUTING) | + (1 << NF_BR_LOCAL_IN) | (1 << NF_BR_FORWARD) | - (1 << NF_BR_LOCAL_OUT), + (1 << NF_BR_LOCAL_OUT) | + (1 << NF_BR_POST_ROUTING), }; static int __init nf_tables_bridge_init(void) diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index a76479535df2..48da2c54a69e 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -16,6 +16,239 @@ #include <net/netfilter/nft_reject.h> #include <net/netfilter/ipv4/nf_reject.h> #include <net/netfilter/ipv6/nf_reject.h> +#include <linux/ip.h> +#include <net/ip.h> +#include <net/ip6_checksum.h> +#include <linux/netfilter_bridge.h> +#include "../br_private.h" + +static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb, + struct sk_buff *nskb) +{ + struct ethhdr *eth; + + eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN); + skb_reset_mac_header(nskb); + ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest); + ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); + eth->h_proto = eth_hdr(oldskb)->h_proto; + skb_pull(nskb, ETH_HLEN); +} + +static int nft_reject_iphdr_validate(struct sk_buff *oldskb) +{ + struct iphdr *iph; + u32 len; + + if (!pskb_may_pull(oldskb, sizeof(struct iphdr))) + return 0; + + iph = ip_hdr(oldskb); + if (iph->ihl < 5 || iph->version != 4) + return 0; + + len = ntohs(iph->tot_len); + if (oldskb->len < len) + return 0; + else if (len < (iph->ihl*4)) + return 0; + + if (!pskb_may_pull(oldskb, iph->ihl*4)) + return 0; + + return 1; +} + +static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + struct iphdr *niph; + const struct tcphdr *oth; + struct tcphdr _oth; + + if (!nft_reject_iphdr_validate(oldskb)) + return; + + oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); + if (!oth) + return; + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + + LL_MAX_HEADER, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, + sysctl_ip_default_ttl); + nf_reject_ip_tcphdr_put(nskb, oldskb, oth); + niph->ttl = sysctl_ip_default_ttl; + niph->tot_len = htons(nskb->len); + ip_send_check(niph); + + nft_reject_br_push_etherhdr(oldskb, nskb); + + br_deliver(br_port_get_rcu(oldskb->dev), nskb); +} + +static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, + u8 code) +{ + struct sk_buff *nskb; + struct iphdr *niph; + struct icmphdr *icmph; + unsigned int len; + void *payload; + __wsum csum; + + if (!nft_reject_iphdr_validate(oldskb)) + return; + + /* IP header checks: fragment. */ + if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) + return; + + /* RFC says return as much as we can without exceeding 576 bytes. */ + len = min_t(unsigned int, 536, oldskb->len); + + if (!pskb_may_pull(oldskb, len)) + return; + + if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) + return; + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + + LL_MAX_HEADER + len, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, + sysctl_ip_default_ttl); + + skb_reset_transport_header(nskb); + icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); + memset(icmph, 0, sizeof(*icmph)); + icmph->type = ICMP_DEST_UNREACH; + icmph->code = code; + + payload = skb_put(nskb, len); + memcpy(payload, skb_network_header(oldskb), len); + + csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0); + icmph->checksum = csum_fold(csum); + + niph->tot_len = htons(nskb->len); + ip_send_check(niph); + + nft_reject_br_push_etherhdr(oldskb, nskb); + + br_deliver(br_port_get_rcu(oldskb->dev), nskb); +} + +static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb) +{ + struct ipv6hdr *hdr; + u32 pkt_len; + + if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr))) + return 0; + + hdr = ipv6_hdr(oldskb); + if (hdr->version != 6) + return 0; + + pkt_len = ntohs(hdr->payload_len); + if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len) + return 0; + + return 1; +} + +static void nft_reject_br_send_v6_tcp_reset(struct net *net, + struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + const struct tcphdr *oth; + struct tcphdr _oth; + unsigned int otcplen; + struct ipv6hdr *nip6h; + + if (!nft_reject_ip6hdr_validate(oldskb)) + return; + + oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook); + if (!oth) + return; + + nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + + LL_MAX_HEADER, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, + net->ipv6.devconf_all->hop_limit); + nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen); + nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); + + nft_reject_br_push_etherhdr(oldskb, nskb); + + br_deliver(br_port_get_rcu(oldskb->dev), nskb); +} + +static void nft_reject_br_send_v6_unreach(struct net *net, + struct sk_buff *oldskb, int hook, + u8 code) +{ + struct sk_buff *nskb; + struct ipv6hdr *nip6h; + struct icmp6hdr *icmp6h; + unsigned int len; + void *payload; + + if (!nft_reject_ip6hdr_validate(oldskb)) + return; + + /* Include "As much of invoking packet as possible without the ICMPv6 + * packet exceeding the minimum IPv6 MTU" in the ICMP payload. + */ + len = min_t(unsigned int, 1220, oldskb->len); + + if (!pskb_may_pull(oldskb, len)) + return; + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + + LL_MAX_HEADER + len, GFP_ATOMIC); + if (!nskb) + return; + + skb_reserve(nskb, LL_MAX_HEADER); + nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6, + net->ipv6.devconf_all->hop_limit); + + skb_reset_transport_header(nskb); + icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr)); + memset(icmp6h, 0, sizeof(*icmp6h)); + icmp6h->icmp6_type = ICMPV6_DEST_UNREACH; + icmp6h->icmp6_code = code; + + payload = skb_put(nskb, len); + memcpy(payload, skb_network_header(oldskb), len); + nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); + + icmp6h->icmp6_cksum = + csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, + nskb->len - sizeof(struct ipv6hdr), + IPPROTO_ICMPV6, + csum_partial(icmp6h, + nskb->len - sizeof(struct ipv6hdr), + 0)); + + nft_reject_br_push_etherhdr(oldskb, nskb); + + br_deliver(br_port_get_rcu(oldskb->dev), nskb); +} static void nft_reject_bridge_eval(const struct nft_expr *expr, struct nft_data data[NFT_REG_MAX + 1], @@ -23,35 +256,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, { struct nft_reject *priv = nft_expr_priv(expr); struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); + const unsigned char *dest = eth_hdr(pkt->skb)->h_dest; + + if (is_broadcast_ether_addr(dest) || + is_multicast_ether_addr(dest)) + goto out; switch (eth_hdr(pkt->skb)->h_proto) { case htons(ETH_P_IP): switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: - nf_send_unreach(pkt->skb, priv->icmp_code); + nft_reject_br_send_v4_unreach(pkt->skb, + pkt->ops->hooknum, + priv->icmp_code); break; case NFT_REJECT_TCP_RST: - nf_send_reset(pkt->skb, pkt->ops->hooknum); + nft_reject_br_send_v4_tcp_reset(pkt->skb, + pkt->ops->hooknum); break; case NFT_REJECT_ICMPX_UNREACH: - nf_send_unreach(pkt->skb, - nft_reject_icmp_code(priv->icmp_code)); + nft_reject_br_send_v4_unreach(pkt->skb, + pkt->ops->hooknum, + nft_reject_icmp_code(priv->icmp_code)); break; } break; case htons(ETH_P_IPV6): switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: - nf_send_unreach6(net, pkt->skb, priv->icmp_code, - pkt->ops->hooknum); + nft_reject_br_send_v6_unreach(net, pkt->skb, + pkt->ops->hooknum, + priv->icmp_code); break; case NFT_REJECT_TCP_RST: - nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); + nft_reject_br_send_v6_tcp_reset(net, pkt->skb, + pkt->ops->hooknum); break; case NFT_REJECT_ICMPX_UNREACH: - nf_send_unreach6(net, pkt->skb, - nft_reject_icmpv6_code(priv->icmp_code), - pkt->ops->hooknum); + nft_reject_br_send_v6_unreach(net, pkt->skb, + pkt->ops->hooknum, + nft_reject_icmpv6_code(priv->icmp_code)); break; } break; @@ -59,15 +303,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr, /* No explicit way to reject this protocol, drop it. */ break; } +out: data[NFT_REG_VERDICT].verdict = NF_DROP; } +static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain) +{ + struct nft_base_chain *basechain; + + if (chain->flags & NFT_BASE_CHAIN) { + basechain = nft_base_chain(chain); + + switch (basechain->ops[0].hooknum) { + case NF_BR_PRE_ROUTING: + case NF_BR_LOCAL_IN: + break; + default: + return -EOPNOTSUPP; + } + } + return 0; +} + static int nft_reject_bridge_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_reject *priv = nft_expr_priv(expr); - int icmp_code; + int icmp_code, err; + + err = nft_reject_bridge_validate_hooks(ctx->chain); + if (err < 0) + return err; if (tb[NFTA_REJECT_TYPE] == NULL) return -EINVAL; @@ -116,6 +383,13 @@ nla_put_failure: return -1; } +static int nft_reject_bridge_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_reject_bridge_validate_hooks(ctx->chain); +} + static struct nft_expr_type nft_reject_bridge_type; static const struct nft_expr_ops nft_reject_bridge_ops = { .type = &nft_reject_bridge_type, @@ -123,6 +397,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = { .eval = nft_reject_bridge_eval, .init = nft_reject_bridge_init, .dump = nft_reject_bridge_dump, + .validate = nft_reject_bridge_validate, }; static struct nft_expr_type nft_reject_bridge_type __read_mostly = { diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 43f750e88e19..fbcd156099fb 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -293,7 +293,7 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, copylen = len; } - ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, copylen); + ret = skb_copy_datagram_msg(skb, 0, m, copylen); if (ret) goto out_free; diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index de6662b14e1f..7e38b729696a 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -149,6 +149,7 @@ static int process_one_ticket(struct ceph_auth_client *ac, struct ceph_crypto_key old_key; void *ticket_buf = NULL; void *tp, *tpend; + void **ptp; struct ceph_timespec new_validity; struct ceph_crypto_key new_session_key; struct ceph_buffer *new_ticket_blob; @@ -208,25 +209,19 @@ static int process_one_ticket(struct ceph_auth_client *ac, goto out; } tp = ticket_buf; - dlen = ceph_decode_32(&tp); + ptp = &tp; + tpend = *ptp + dlen; } else { /* unencrypted */ - ceph_decode_32_safe(p, end, dlen, bad); - ticket_buf = kmalloc(dlen, GFP_NOFS); - if (!ticket_buf) { - ret = -ENOMEM; - goto out; - } - tp = ticket_buf; - ceph_decode_need(p, end, dlen, bad); - ceph_decode_copy(p, ticket_buf, dlen); + ptp = p; + tpend = end; } - tpend = tp + dlen; + ceph_decode_32_safe(ptp, tpend, dlen, bad); dout(" ticket blob is %d bytes\n", dlen); - ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); - blob_struct_v = ceph_decode_8(&tp); - new_secret_id = ceph_decode_64(&tp); - ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); + ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); + blob_struct_v = ceph_decode_8(ptp); + new_secret_id = ceph_decode_64(ptp); + ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); if (ret) goto out; diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 62fc5e7a9acf..790fe89d90c0 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) static const u8 *aes_iv = (u8 *)CEPH_AES_IV; +/* + * Should be used for buffers allocated with ceph_kvmalloc(). + * Currently these are encrypt out-buffer (ceph_buffer) and decrypt + * in-buffer (msg front). + * + * Dispose of @sgt with teardown_sgtable(). + * + * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() + * in cases where a single sg is sufficient. No attempt to reduce the + * number of sgs by squeezing physically contiguous pages together is + * made though, for simplicity. + */ +static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, + const void *buf, unsigned int buf_len) +{ + struct scatterlist *sg; + const bool is_vmalloc = is_vmalloc_addr(buf); + unsigned int off = offset_in_page(buf); + unsigned int chunk_cnt = 1; + unsigned int chunk_len = PAGE_ALIGN(off + buf_len); + int i; + int ret; + + if (buf_len == 0) { + memset(sgt, 0, sizeof(*sgt)); + return -EINVAL; + } + + if (is_vmalloc) { + chunk_cnt = chunk_len >> PAGE_SHIFT; + chunk_len = PAGE_SIZE; + } + + if (chunk_cnt > 1) { + ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); + if (ret) + return ret; + } else { + WARN_ON(chunk_cnt != 1); + sg_init_table(prealloc_sg, 1); + sgt->sgl = prealloc_sg; + sgt->nents = sgt->orig_nents = 1; + } + + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { + struct page *page; + unsigned int len = min(chunk_len - off, buf_len); + + if (is_vmalloc) + page = vmalloc_to_page(buf); + else + page = virt_to_page(buf); + + sg_set_page(sg, page, len, off); + + off = 0; + buf += len; + buf_len -= len; + } + WARN_ON(buf_len != 0); + + return 0; +} + +static void teardown_sgtable(struct sg_table *sgt) +{ + if (sgt->orig_nents > 1) + sg_free_table(sgt); +} + static int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { - struct scatterlist sg_in[2], sg_out[1]; + struct scatterlist sg_in[2], prealloc_sg; + struct sg_table sg_out; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; @@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len, *dst_len = src_len + zero_padding; - crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], src, src_len); sg_set_buf(&sg_in[1], pad, zero_padding); - sg_init_table(sg_out, 1); - sg_set_buf(sg_out, dst, *dst_len); + ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); + if (ret) + goto out_tfm; + + crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, aes_iv, ivsize); + /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); @@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len, print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ - ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, src_len + zero_padding); - crypto_free_blkcipher(tfm); - if (ret < 0) + if (ret < 0) { pr_err("ceph_aes_crypt failed %d\n", ret); + goto out_sg; + } /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ - return 0; + +out_sg: + teardown_sgtable(&sg_out); +out_tfm: + crypto_free_blkcipher(tfm); + return ret; } static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, @@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { - struct scatterlist sg_in[3], sg_out[1]; + struct scatterlist sg_in[3], prealloc_sg; + struct sg_table sg_out; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; @@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, *dst_len = src1_len + src2_len + zero_padding; - crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 3); sg_set_buf(&sg_in[0], src1, src1_len); sg_set_buf(&sg_in[1], src2, src2_len); sg_set_buf(&sg_in[2], pad, zero_padding); - sg_init_table(sg_out, 1); - sg_set_buf(sg_out, dst, *dst_len); + ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); + if (ret) + goto out_tfm; + + crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, aes_iv, ivsize); + /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); @@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ - ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, + ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, src1_len + src2_len + zero_padding); - crypto_free_blkcipher(tfm); - if (ret < 0) + if (ret < 0) { pr_err("ceph_aes_crypt2 failed %d\n", ret); + goto out_sg; + } /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ - return 0; + +out_sg: + teardown_sgtable(&sg_out); +out_tfm: + crypto_free_blkcipher(tfm); + return ret; } static int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { - struct scatterlist sg_in[1], sg_out[2]; + struct sg_table sg_in; + struct scatterlist sg_out[2], prealloc_sg; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; @@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len, if (IS_ERR(tfm)) return PTR_ERR(tfm); - crypto_blkcipher_setkey((void *)tfm, key, key_len); - sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); - sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); sg_set_buf(&sg_out[1], pad, sizeof(pad)); + ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); + if (ret) + goto out_tfm; + crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, aes_iv, ivsize); /* @@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len, print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); */ - - ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); - crypto_free_blkcipher(tfm); + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); - return ret; + goto out_sg; } if (src_len <= *dst_len) @@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len, print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ - return 0; + +out_sg: + teardown_sgtable(&sg_in); +out_tfm: + crypto_free_blkcipher(tfm); + return ret; } static int ceph_aes_decrypt2(const void *key, int key_len, @@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { - struct scatterlist sg_in[1], sg_out[3]; + struct sg_table sg_in; + struct scatterlist sg_out[3], prealloc_sg; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; @@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len, if (IS_ERR(tfm)) return PTR_ERR(tfm); - sg_init_table(sg_in, 1); - sg_set_buf(sg_in, src, src_len); sg_init_table(sg_out, 3); sg_set_buf(&sg_out[0], dst1, *dst1_len); sg_set_buf(&sg_out[1], dst2, *dst2_len); sg_set_buf(&sg_out[2], pad, sizeof(pad)); + ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); + if (ret) + goto out_tfm; crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); - memcpy(iv, aes_iv, ivsize); /* @@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len, print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); */ - - ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); - crypto_free_blkcipher(tfm); + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); - return ret; + goto out_sg; } if (src_len <= *dst1_len) @@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len, dst2, *dst2_len, 1); */ - return 0; +out_sg: + teardown_sgtable(&sg_in); +out_tfm: + crypto_free_blkcipher(tfm); + return ret; } diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 559c9f619c20..8d1653caffdb 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con) IPPROTO_TCP, &sock); if (ret) return ret; - sock->sk->sk_allocation = GFP_NOFS; + sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC; #ifdef CONFIG_LOCKDEP lockdep_set_class(&sock->sk->sk_lock, &socket_class); @@ -509,6 +509,9 @@ static int ceph_tcp_connect(struct ceph_connection *con) return ret; } + + sk_set_memalloc(sock->sk); + con->sock = sock; return 0; } @@ -2769,8 +2772,11 @@ static void con_work(struct work_struct *work) { struct ceph_connection *con = container_of(work, struct ceph_connection, work.work); + unsigned long pflags = current->flags; bool fault; + current->flags |= PF_MEMALLOC; + mutex_lock(&con->mutex); while (true) { int ret; @@ -2824,6 +2830,8 @@ static void con_work(struct work_struct *work) con_fault_finish(con); con->ops->put(con); + + tsk_restore_flags(current, pflags, PF_MEMALLOC); } /* diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f3fc54eac09d..6f164289bde8 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd) static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { dout("__remove_osd %p\n", osd); - BUG_ON(!list_empty(&osd->o_requests)); - BUG_ON(!list_empty(&osd->o_linger_requests)); + WARN_ON(!list_empty(&osd->o_requests)); + WARN_ON(!list_empty(&osd->o_linger_requests)); rb_erase(&osd->o_node, &osdc->osds); list_del_init(&osd->o_osd_lru); @@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, if (list_empty(&req->r_osd_item)) req->r_osd = NULL; } + + list_del_init(&req->r_req_lru_item); /* can be on notarget */ ceph_osdc_put_request(req); } @@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc, if (req->r_osd) { __cancel_request(req); list_del_init(&req->r_osd_item); + list_del_init(&req->r_linger_osd_item); req->r_osd = NULL; } diff --git a/net/compat.c b/net/compat.c index bc8aeefddf3f..062f157d2a6b 100644 --- a/net/compat.c +++ b/net/compat.c @@ -31,41 +31,18 @@ #include <asm/uaccess.h> #include <net/compat.h> -static inline int iov_from_user_compat_to_kern(struct iovec *kiov, - struct compat_iovec __user *uiov32, - int niov) +ssize_t get_compat_msghdr(struct msghdr *kmsg, + struct compat_msghdr __user *umsg, + struct sockaddr __user **save_addr, + struct iovec **iov) { - int tot_len = 0; - - while (niov > 0) { - compat_uptr_t buf; - compat_size_t len; - - if (get_user(len, &uiov32->iov_len) || - get_user(buf, &uiov32->iov_base)) - return -EFAULT; - - if (len > INT_MAX - tot_len) - len = INT_MAX - tot_len; - - tot_len += len; - kiov->iov_base = compat_ptr(buf); - kiov->iov_len = (__kernel_size_t) len; - uiov32++; - kiov++; - niov--; - } - return tot_len; -} - -int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) -{ - compat_uptr_t tmp1, tmp2, tmp3; + compat_uptr_t uaddr, uiov, tmp3; + ssize_t err; if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) || - __get_user(tmp1, &umsg->msg_name) || + __get_user(uaddr, &umsg->msg_name) || __get_user(kmsg->msg_namelen, &umsg->msg_namelen) || - __get_user(tmp2, &umsg->msg_iov) || + __get_user(uiov, &umsg->msg_iov) || __get_user(kmsg->msg_iovlen, &umsg->msg_iovlen) || __get_user(tmp3, &umsg->msg_control) || __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || @@ -73,39 +50,33 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg) return -EFAULT; if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); - kmsg->msg_name = compat_ptr(tmp1); - kmsg->msg_iov = compat_ptr(tmp2); kmsg->msg_control = compat_ptr(tmp3); - return 0; -} -/* I've named the args so it is easy to tell whose space the pointers are in. */ -int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov, - struct sockaddr_storage *kern_address, int mode) -{ - int tot_len; + if (save_addr) + *save_addr = compat_ptr(uaddr); - if (kern_msg->msg_name && kern_msg->msg_namelen) { - if (mode == VERIFY_READ) { - int err = move_addr_to_kernel(kern_msg->msg_name, - kern_msg->msg_namelen, - kern_address); + if (uaddr && kmsg->msg_namelen) { + if (!save_addr) { + err = move_addr_to_kernel(compat_ptr(uaddr), + kmsg->msg_namelen, + kmsg->msg_name); if (err < 0) return err; } - kern_msg->msg_name = kern_address; } else { - kern_msg->msg_name = NULL; - kern_msg->msg_namelen = 0; + kmsg->msg_name = NULL; + kmsg->msg_namelen = 0; } - tot_len = iov_from_user_compat_to_kern(kern_iov, - (struct compat_iovec __user *)kern_msg->msg_iov, - kern_msg->msg_iovlen); - if (tot_len >= 0) - kern_msg->msg_iov = kern_iov; + if (kmsg->msg_iovlen > UIO_MAXIOV) + return -EMSGSIZE; - return tot_len; + err = compat_rw_copy_check_uvector(save_addr ? READ : WRITE, + compat_ptr(uiov), kmsg->msg_iovlen, + UIO_FASTIOV, *iov, iov); + if (err >= 0) + kmsg->msg_iov = *iov; + return err; } /* Bleech... */ @@ -740,7 +711,7 @@ COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, uns { if (flags & MSG_CMSG_COMPAT) return -EINVAL; - return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, @@ -756,7 +727,7 @@ COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, uns { if (flags & MSG_CMSG_COMPAT) return -EINVAL; - return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } COMPAT_SYSCALL_DEFINE4(recv, int, fd, void __user *, buf, compat_size_t, len, unsigned int, flags) diff --git a/net/core/datagram.c b/net/core/datagram.c index fdbc9a81d4c2..26391a3fe3e5 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -49,6 +49,7 @@ #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/pagemap.h> +#include <linux/uio.h> #include <net/protocol.h> #include <linux/skbuff.h> @@ -393,34 +394,30 @@ fault: EXPORT_SYMBOL(skb_copy_datagram_iovec); /** - * skb_copy_datagram_const_iovec - Copy a datagram to an iovec. + * skb_copy_datagram_iter - Copy a datagram to an iovec iterator. * @skb: buffer to copy * @offset: offset in the buffer to start copying from - * @to: io vector to copy to - * @to_offset: offset in the io vector to start copying to + * @to: iovec iterator to copy to * @len: amount of data to copy from buffer to iovec - * - * Returns 0 or -EFAULT. - * Note: the iovec is not modified during the copy. */ -int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, - const struct iovec *to, int to_offset, - int len) +int skb_copy_datagram_iter(const struct sk_buff *skb, int offset, + struct iov_iter *to, int len) { int start = skb_headlen(skb); int i, copy = start - offset; struct sk_buff *frag_iter; + trace_skb_copy_datagram_iovec(skb, len); + /* Copy header. */ if (copy > 0) { if (copy > len) copy = len; - if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy)) - goto fault; + if (copy_to_iter(skb->data + offset, copy, to) != copy) + goto short_copy; if ((len -= copy) == 0) return 0; offset += copy; - to_offset += copy; } /* Copy paged appendix. Hmm... why does this look so complicated? */ @@ -432,22 +429,15 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, end = start + skb_frag_size(frag); if ((copy = end - offset) > 0) { - int err; - u8 *vaddr; - struct page *page = skb_frag_page(frag); - if (copy > len) copy = len; - vaddr = kmap(page); - err = memcpy_toiovecend(to, vaddr + frag->page_offset + - offset - start, to_offset, copy); - kunmap(page); - if (err) - goto fault; + if (copy_page_to_iter(skb_frag_page(frag), + frag->page_offset + offset - + start, copy, to) != copy) + goto short_copy; if (!(len -= copy)) return 0; offset += copy; - to_offset += copy; } start = end; } @@ -461,25 +451,33 @@ int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset, if ((copy = end - offset) > 0) { if (copy > len) copy = len; - if (skb_copy_datagram_const_iovec(frag_iter, - offset - start, - to, to_offset, - copy)) + if (skb_copy_datagram_iter(frag_iter, offset - start, + to, copy)) goto fault; if ((len -= copy) == 0) return 0; offset += copy; - to_offset += copy; } start = end; } if (!len) return 0; + /* This is not really a user copy fault, but rather someone + * gave us a bogus length on the skb. We should probably + * print a warning here as it may indicate a kernel bug. + */ + fault: return -EFAULT; + +short_copy: + if (iov_iter_count(to)) + goto fault; + + return 0; } -EXPORT_SYMBOL(skb_copy_datagram_const_iovec); +EXPORT_SYMBOL(skb_copy_datagram_iter); /** * skb_copy_datagram_from_iovec - Copy a datagram from an iovec. diff --git a/net/core/dev.c b/net/core/dev.c index b793e3521a36..ac4836241a96 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -118,6 +118,7 @@ #include <linux/if_vlan.h> #include <linux/ip.h> #include <net/ip.h> +#include <net/mpls.h> #include <linux/ipv6.h> #include <linux/in.h> #include <linux/jhash.h> @@ -133,6 +134,7 @@ #include <linux/vmalloc.h> #include <linux/if_macvlan.h> #include <linux/errqueue.h> +#include <linux/hrtimer.h> #include "net-sysfs.h" @@ -1435,22 +1437,17 @@ EXPORT_SYMBOL(dev_close); */ void dev_disable_lro(struct net_device *dev) { - /* - * If we're trying to disable lro on a vlan device - * use the underlying physical device instead - */ - if (is_vlan_dev(dev)) - dev = vlan_dev_real_dev(dev); - - /* the same for macvlan devices */ - if (netif_is_macvlan(dev)) - dev = macvlan_dev_real_dev(dev); + struct net_device *lower_dev; + struct list_head *iter; dev->wanted_features &= ~NETIF_F_LRO; netdev_update_features(dev); if (unlikely(dev->features & NETIF_F_LRO)) netdev_WARN(dev, "failed to disable LRO!\n"); + + netdev_for_each_lower_dev(dev, lower_dev, iter) + dev_disable_lro(lower_dev); } EXPORT_SYMBOL(dev_disable_lro); @@ -2530,7 +2527,7 @@ static netdev_features_t net_mpls_features(struct sk_buff *skb, netdev_features_t features, __be16 type) { - if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC)) + if (eth_p_mpls(type)) features &= skb->dev->mpls_features; return features; @@ -2647,12 +2644,8 @@ static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb, netdev_features_t features) { if (vlan_tx_tag_present(skb) && - !vlan_hw_offload_capable(features, skb->vlan_proto)) { - skb = __vlan_put_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); - if (skb) - skb->vlan_tci = 0; - } + !vlan_hw_offload_capable(features, skb->vlan_proto)) + skb = __vlan_hwaccel_push_inside(skb); return skb; } @@ -4157,6 +4150,10 @@ EXPORT_SYMBOL(napi_gro_receive); static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) { + if (unlikely(skb->pfmemalloc)) { + consume_skb(skb); + return; + } __skb_pull(skb, skb_headlen(skb)); /* restore the reserve we had after netdev_alloc_skb_ip_align() */ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); @@ -4312,20 +4309,28 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) local_irq_enable(); } +static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) +{ +#ifdef CONFIG_RPS + return sd->rps_ipi_list != NULL; +#else + return false; +#endif +} + static int process_backlog(struct napi_struct *napi, int quota) { int work = 0; struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); -#ifdef CONFIG_RPS /* Check if we have pending ipi, its better to send them now, * not waiting net_rx_action() end. */ - if (sd->rps_ipi_list) { + if (sd_has_rps_ipi_waiting(sd)) { local_irq_disable(); net_rps_action_and_irq_enable(sd); } -#endif + napi->weight = weight_p; local_irq_disable(); while (1) { @@ -4352,7 +4357,6 @@ static int process_backlog(struct napi_struct *napi, int quota) * We can use a plain write instead of clear_bit(), * and we dont need an smp_mb() memory barrier. */ - list_del(&napi->poll_list); napi->state = 0; rps_unlock(sd); @@ -4372,7 +4376,8 @@ static int process_backlog(struct napi_struct *napi, int quota) * __napi_schedule - schedule for receive * @n: entry to schedule * - * The entry's receive function will be scheduled to run + * The entry's receive function will be scheduled to run. + * Consider using __napi_schedule_irqoff() if hard irqs are masked. */ void __napi_schedule(struct napi_struct *n) { @@ -4384,18 +4389,29 @@ void __napi_schedule(struct napi_struct *n) } EXPORT_SYMBOL(__napi_schedule); +/** + * __napi_schedule_irqoff - schedule for receive + * @n: entry to schedule + * + * Variant of __napi_schedule() assuming hard irqs are masked + */ +void __napi_schedule_irqoff(struct napi_struct *n) +{ + ____napi_schedule(this_cpu_ptr(&softnet_data), n); +} +EXPORT_SYMBOL(__napi_schedule_irqoff); + void __napi_complete(struct napi_struct *n) { BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); - BUG_ON(n->gro_list); - list_del(&n->poll_list); + list_del_init(&n->poll_list); smp_mb__before_atomic(); clear_bit(NAPI_STATE_SCHED, &n->state); } EXPORT_SYMBOL(__napi_complete); -void napi_complete(struct napi_struct *n) +void napi_complete_done(struct napi_struct *n, int work_done) { unsigned long flags; @@ -4406,12 +4422,28 @@ void napi_complete(struct napi_struct *n) if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state))) return; - napi_gro_flush(n, false); - local_irq_save(flags); - __napi_complete(n); - local_irq_restore(flags); + if (n->gro_list) { + unsigned long timeout = 0; + + if (work_done) + timeout = n->dev->gro_flush_timeout; + + if (timeout) + hrtimer_start(&n->timer, ns_to_ktime(timeout), + HRTIMER_MODE_REL_PINNED); + else + napi_gro_flush(n, false); + } + if (likely(list_empty(&n->poll_list))) { + WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); + } else { + /* If n->poll_list is not empty, we need to mask irqs */ + local_irq_save(flags); + __napi_complete(n); + local_irq_restore(flags); + } } -EXPORT_SYMBOL(napi_complete); +EXPORT_SYMBOL(napi_complete_done); /* must be called under rcu_read_lock(), as we dont take a reference */ struct napi_struct *napi_by_id(unsigned int napi_id) @@ -4465,10 +4497,23 @@ void napi_hash_del(struct napi_struct *napi) } EXPORT_SYMBOL_GPL(napi_hash_del); +static enum hrtimer_restart napi_watchdog(struct hrtimer *timer) +{ + struct napi_struct *napi; + + napi = container_of(timer, struct napi_struct, timer); + if (napi->gro_list) + napi_schedule(napi); + + return HRTIMER_NORESTART; +} + void netif_napi_add(struct net_device *dev, struct napi_struct *napi, int (*poll)(struct napi_struct *, int), int weight) { INIT_LIST_HEAD(&napi->poll_list); + hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); + napi->timer.function = napi_watchdog; napi->gro_count = 0; napi->gro_list = NULL; napi->skb = NULL; @@ -4487,6 +4532,20 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, } EXPORT_SYMBOL(netif_napi_add); +void napi_disable(struct napi_struct *n) +{ + might_sleep(); + set_bit(NAPI_STATE_DISABLE, &n->state); + + while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) + msleep(1); + + hrtimer_cancel(&n->timer); + + clear_bit(NAPI_STATE_DISABLE, &n->state); +} +EXPORT_SYMBOL(napi_disable); + void netif_napi_del(struct napi_struct *napi) { list_del_init(&napi->dev_list); @@ -4503,29 +4562,28 @@ static void net_rx_action(struct softirq_action *h) struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; + LIST_HEAD(list); + LIST_HEAD(repoll); void *have; local_irq_disable(); + list_splice_init(&sd->poll_list, &list); + local_irq_enable(); - while (!list_empty(&sd->poll_list)) { + while (!list_empty(&list)) { struct napi_struct *n; int work, weight; - /* If softirq window is exhuasted then punt. + /* If softirq window is exhausted then punt. * Allow this to run for 2 jiffies since which will allow * an average latency of 1.5/HZ. */ if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit))) goto softnet_break; - local_irq_enable(); - /* Even though interrupts have been re-enabled, this - * access is safe because interrupts can only add new - * entries to the tail of this list, and only ->poll() - * calls can remove this head entry from the list. - */ - n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list); + n = list_first_entry(&list, struct napi_struct, poll_list); + list_del_init(&n->poll_list); have = netpoll_poll_lock(n); @@ -4547,8 +4605,6 @@ static void net_rx_action(struct softirq_action *h) budget -= work; - local_irq_disable(); - /* Drivers must not modify the NAPI state if they * consume the entire weight. In such cases this code * still "owns" the NAPI instance and therefore can @@ -4556,32 +4612,40 @@ static void net_rx_action(struct softirq_action *h) */ if (unlikely(work == weight)) { if (unlikely(napi_disable_pending(n))) { - local_irq_enable(); napi_complete(n); - local_irq_disable(); } else { if (n->gro_list) { /* flush too old packets * If HZ < 1000, flush all packets. */ - local_irq_enable(); napi_gro_flush(n, HZ >= 1000); - local_irq_disable(); } - list_move_tail(&n->poll_list, &sd->poll_list); + list_add_tail(&n->poll_list, &repoll); } } netpoll_poll_unlock(have); } + + if (!sd_has_rps_ipi_waiting(sd) && + list_empty(&list) && + list_empty(&repoll)) + return; out: + local_irq_disable(); + + list_splice_tail_init(&sd->poll_list, &list); + list_splice_tail(&repoll, &list); + list_splice(&list, &sd->poll_list); + if (!list_empty(&sd->poll_list)) + __raise_softirq_irqoff(NET_RX_SOFTIRQ); + net_rps_action_and_irq_enable(sd); return; softnet_break: sd->time_squeeze++; - __raise_softirq_irqoff(NET_RX_SOFTIRQ); goto out; } diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index b6b230600b97..c0548d268e1a 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -278,8 +278,8 @@ int __hw_addr_sync_dev(struct netdev_hw_addr_list *list, EXPORT_SYMBOL(__hw_addr_sync_dev); /** - * __hw_addr_unsync_dev - Remove synchonized addresses from device - * @list: address list to remove syncronized addresses from + * __hw_addr_unsync_dev - Remove synchronized addresses from device + * @list: address list to remove synchronized addresses from * @dev: device to sync * @unsync: function to call if address should be removed * diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 72e899a3efda..b94b1d293506 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -142,10 +142,12 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm case SIOCGIFHWADDR: if (!dev->addr_len) - memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data); + memset(ifr->ifr_hwaddr.sa_data, 0, + sizeof(ifr->ifr_hwaddr.sa_data)); else memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, - min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); + min(sizeof(ifr->ifr_hwaddr.sa_data), + (size_t)dev->addr_len)); ifr->ifr_hwaddr.sa_family = dev->type; return 0; @@ -265,7 +267,8 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) if (ifr->ifr_hwaddr.sa_family != dev->type) return -EINVAL; memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, - min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len)); + min(sizeof(ifr->ifr_hwaddr.sa_data), + (size_t)dev->addr_len)); call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); return 0; diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 1600aa24d36b..715f51f321e9 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -25,6 +25,7 @@ #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/sched.h> +#include <linux/net.h> /* * Some useful ethtool_ops methods that're device independent. @@ -84,7 +85,6 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_GSO_IPIP_BIT] = "tx-ipip-segmentation", [NETIF_F_GSO_SIT_BIT] = "tx-sit-segmentation", [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation", - [NETIF_F_GSO_MPLS_BIT] = "tx-mpls-segmentation", [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", @@ -574,6 +574,16 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr, return 0; } +u8 netdev_rss_key[NETDEV_RSS_KEY_LEN]; + +void netdev_rss_key_fill(void *buffer, size_t len) +{ + BUG_ON(len > sizeof(netdev_rss_key)); + net_get_random_once(netdev_rss_key, sizeof(netdev_rss_key)); + memcpy(buffer, netdev_rss_key, len); +} +EXPORT_SYMBOL(netdev_rss_key_fill); + static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, void __user *useraddr) { @@ -1036,7 +1046,8 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) { const struct ethtool_ops *ops = dev->ethtool_ops; - if (!ops->get_eeprom || !ops->get_eeprom_len) + if (!ops->get_eeprom || !ops->get_eeprom_len || + !ops->get_eeprom_len(dev)) return -EOPNOTSUPP; return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, @@ -1052,7 +1063,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) u8 *data; int ret = 0; - if (!ops->set_eeprom || !ops->get_eeprom_len) + if (!ops->set_eeprom || !ops->get_eeprom_len || + !ops->get_eeprom_len(dev)) return -EOPNOTSUPP; if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) diff --git a/net/core/iovec.c b/net/core/iovec.c index e1ec45ab1e63..dcbe98b3726a 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@ -28,53 +28,6 @@ #include <net/sock.h> /* - * Verify iovec. The caller must ensure that the iovec is big enough - * to hold the message iovec. - * - * Save time not doing access_ok. copy_*_user will make this work - * in any case. - */ - -int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode) -{ - int size, ct, err; - - if (m->msg_name && m->msg_namelen) { - if (mode == VERIFY_READ) { - void __user *namep; - namep = (void __user __force *) m->msg_name; - err = move_addr_to_kernel(namep, m->msg_namelen, - address); - if (err < 0) - return err; - } - m->msg_name = address; - } else { - m->msg_name = NULL; - m->msg_namelen = 0; - } - - size = m->msg_iovlen * sizeof(struct iovec); - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size)) - return -EFAULT; - - m->msg_iov = iov; - err = 0; - - for (ct = 0; ct < m->msg_iovlen; ct++) { - size_t len = iov[ct].iov_len; - - if (len > INT_MAX - err) { - len = INT_MAX - err; - iov[ct].iov_len = len; - } - err += len; - } - - return err; -} - -/* * And now for the all-in-one: copy and checksum from a user iovec * directly to a datagram * Calls to csum_partial but the last must be in 32 bit chunks diff --git a/net/core/link_watch.c b/net/core/link_watch.c index bd0767e6b2b3..49a9e3e06c08 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -21,7 +21,7 @@ #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/bitops.h> -#include <asm/types.h> +#include <linux/types.h> enum lw_bits { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index ef31fef25e5a..8e38f17288d3 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -56,7 +56,6 @@ static void __neigh_notify(struct neighbour *n, int type, int flags); static void neigh_update_notify(struct neighbour *neigh); static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); -static struct neigh_table *neigh_tables; #ifdef CONFIG_PROC_FS static const struct file_operations neigh_stat_seq_fops; #endif @@ -87,13 +86,8 @@ static const struct file_operations neigh_stat_seq_fops; the most complicated procedure, which we allow is dev->hard_header. It is supposed, that dev->hard_header is simplistic and does not make callbacks to neighbour tables. - - The last lock is neigh_tbl_lock. It is pure SMP lock, protecting - list of neighbour tables. This list is used only in process context, */ -static DEFINE_RWLOCK(neigh_tbl_lock); - static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb) { kfree_skb(skb); @@ -773,7 +767,7 @@ static void neigh_periodic_work(struct work_struct *work) if (time_after(jiffies, tbl->last_rand + 300 * HZ)) { struct neigh_parms *p; tbl->last_rand = jiffies; - for (p = &tbl->parms; p; p = p->next) + list_for_each_entry(p, &tbl->parms_list, list) p->reachable_time = neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); } @@ -1446,7 +1440,7 @@ static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl, { struct neigh_parms *p; - for (p = &tbl->parms; p; p = p->next) { + list_for_each_entry(p, &tbl->parms_list, list) { if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) || (!p->dev && !ifindex && net_eq(net, &init_net))) return p; @@ -1481,8 +1475,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, } write_lock_bh(&tbl->lock); - p->next = tbl->parms.next; - tbl->parms.next = p; + list_add(&p->list, &tbl->parms.list); write_unlock_bh(&tbl->lock); neigh_parms_data_state_cleanall(p); @@ -1501,24 +1494,15 @@ static void neigh_rcu_free_parms(struct rcu_head *head) void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) { - struct neigh_parms **p; - if (!parms || parms == &tbl->parms) return; write_lock_bh(&tbl->lock); - for (p = &tbl->parms.next; *p; p = &(*p)->next) { - if (*p == parms) { - *p = parms->next; - parms->dead = 1; - write_unlock_bh(&tbl->lock); - if (parms->dev) - dev_put(parms->dev); - call_rcu(&parms->rcu_head, neigh_rcu_free_parms); - return; - } - } + list_del(&parms->list); + parms->dead = 1; write_unlock_bh(&tbl->lock); - neigh_dbg(1, "%s: not found\n", __func__); + if (parms->dev) + dev_put(parms->dev); + call_rcu(&parms->rcu_head, neigh_rcu_free_parms); } EXPORT_SYMBOL(neigh_parms_release); @@ -1530,11 +1514,15 @@ static void neigh_parms_destroy(struct neigh_parms *parms) static struct lock_class_key neigh_table_proxy_queue_class; -static void neigh_table_init_no_netlink(struct neigh_table *tbl) +static struct neigh_table *neigh_tables[NEIGH_NR_TABLES] __read_mostly; + +void neigh_table_init(int index, struct neigh_table *tbl) { unsigned long now = jiffies; unsigned long phsize; + INIT_LIST_HEAD(&tbl->parms_list); + list_add(&tbl->parms.list, &tbl->parms_list); write_pnet(&tbl->parms.net, &init_net); atomic_set(&tbl->parms.refcnt, 1); tbl->parms.reachable_time = @@ -1574,34 +1562,14 @@ static void neigh_table_init_no_netlink(struct neigh_table *tbl) tbl->last_flush = now; tbl->last_rand = now + tbl->parms.reachable_time * 20; -} - -void neigh_table_init(struct neigh_table *tbl) -{ - struct neigh_table *tmp; - - neigh_table_init_no_netlink(tbl); - write_lock(&neigh_tbl_lock); - for (tmp = neigh_tables; tmp; tmp = tmp->next) { - if (tmp->family == tbl->family) - break; - } - tbl->next = neigh_tables; - neigh_tables = tbl; - write_unlock(&neigh_tbl_lock); - if (unlikely(tmp)) { - pr_err("Registering multiple tables for family %d\n", - tbl->family); - dump_stack(); - } + neigh_tables[index] = tbl; } EXPORT_SYMBOL(neigh_table_init); -int neigh_table_clear(struct neigh_table *tbl) +int neigh_table_clear(int index, struct neigh_table *tbl) { - struct neigh_table **tp; - + neigh_tables[index] = NULL; /* It is not clean... Fix it to unload IPv6 module safely */ cancel_delayed_work_sync(&tbl->gc_work); del_timer_sync(&tbl->proxy_timer); @@ -1609,14 +1577,6 @@ int neigh_table_clear(struct neigh_table *tbl) neigh_ifdown(tbl, NULL); if (atomic_read(&tbl->entries)) pr_crit("neighbour leakage\n"); - write_lock(&neigh_tbl_lock); - for (tp = &neigh_tables; *tp; tp = &(*tp)->next) { - if (*tp == tbl) { - *tp = tbl->next; - break; - } - } - write_unlock(&neigh_tbl_lock); call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu, neigh_hash_free_rcu); @@ -1634,12 +1594,32 @@ int neigh_table_clear(struct neigh_table *tbl) } EXPORT_SYMBOL(neigh_table_clear); +static struct neigh_table *neigh_find_table(int family) +{ + struct neigh_table *tbl = NULL; + + switch (family) { + case AF_INET: + tbl = neigh_tables[NEIGH_ARP_TABLE]; + break; + case AF_INET6: + tbl = neigh_tables[NEIGH_ND_TABLE]; + break; + case AF_DECnet: + tbl = neigh_tables[NEIGH_DN_TABLE]; + break; + } + + return tbl; +} + static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); struct ndmsg *ndm; struct nlattr *dst_attr; struct neigh_table *tbl; + struct neighbour *neigh; struct net_device *dev = NULL; int err = -EINVAL; @@ -1660,39 +1640,31 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh) } } - read_lock(&neigh_tbl_lock); - for (tbl = neigh_tables; tbl; tbl = tbl->next) { - struct neighbour *neigh; + tbl = neigh_find_table(ndm->ndm_family); + if (tbl == NULL) + return -EAFNOSUPPORT; - if (tbl->family != ndm->ndm_family) - continue; - read_unlock(&neigh_tbl_lock); - - if (nla_len(dst_attr) < tbl->key_len) - goto out; - - if (ndm->ndm_flags & NTF_PROXY) { - err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); - goto out; - } + if (nla_len(dst_attr) < tbl->key_len) + goto out; - if (dev == NULL) - goto out; + if (ndm->ndm_flags & NTF_PROXY) { + err = pneigh_delete(tbl, net, nla_data(dst_attr), dev); + goto out; + } - neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); - if (neigh == NULL) { - err = -ENOENT; - goto out; - } + if (dev == NULL) + goto out; - err = neigh_update(neigh, NULL, NUD_FAILED, - NEIGH_UPDATE_F_OVERRIDE | - NEIGH_UPDATE_F_ADMIN); - neigh_release(neigh); + neigh = neigh_lookup(tbl, nla_data(dst_attr), dev); + if (neigh == NULL) { + err = -ENOENT; goto out; } - read_unlock(&neigh_tbl_lock); - err = -EAFNOSUPPORT; + + err = neigh_update(neigh, NULL, NUD_FAILED, + NEIGH_UPDATE_F_OVERRIDE | + NEIGH_UPDATE_F_ADMIN); + neigh_release(neigh); out: return err; @@ -1700,11 +1672,14 @@ out: static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) { + int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE; struct net *net = sock_net(skb->sk); struct ndmsg *ndm; struct nlattr *tb[NDA_MAX+1]; struct neigh_table *tbl; struct net_device *dev = NULL; + struct neighbour *neigh; + void *dst, *lladdr; int err; ASSERT_RTNL(); @@ -1728,70 +1703,60 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh) goto out; } - read_lock(&neigh_tbl_lock); - for (tbl = neigh_tables; tbl; tbl = tbl->next) { - int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE; - struct neighbour *neigh; - void *dst, *lladdr; + tbl = neigh_find_table(ndm->ndm_family); + if (tbl == NULL) + return -EAFNOSUPPORT; - if (tbl->family != ndm->ndm_family) - continue; - read_unlock(&neigh_tbl_lock); + if (nla_len(tb[NDA_DST]) < tbl->key_len) + goto out; + dst = nla_data(tb[NDA_DST]); + lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; - if (nla_len(tb[NDA_DST]) < tbl->key_len) - goto out; - dst = nla_data(tb[NDA_DST]); - lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL; + if (ndm->ndm_flags & NTF_PROXY) { + struct pneigh_entry *pn; - if (ndm->ndm_flags & NTF_PROXY) { - struct pneigh_entry *pn; + err = -ENOBUFS; + pn = pneigh_lookup(tbl, net, dst, dev, 1); + if (pn) { + pn->flags = ndm->ndm_flags; + err = 0; + } + goto out; + } - err = -ENOBUFS; - pn = pneigh_lookup(tbl, net, dst, dev, 1); - if (pn) { - pn->flags = ndm->ndm_flags; - err = 0; - } + if (dev == NULL) + goto out; + + neigh = neigh_lookup(tbl, dst, dev); + if (neigh == NULL) { + if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { + err = -ENOENT; goto out; } - if (dev == NULL) + neigh = __neigh_lookup_errno(tbl, dst, dev); + if (IS_ERR(neigh)) { + err = PTR_ERR(neigh); + goto out; + } + } else { + if (nlh->nlmsg_flags & NLM_F_EXCL) { + err = -EEXIST; + neigh_release(neigh); goto out; - - neigh = neigh_lookup(tbl, dst, dev); - if (neigh == NULL) { - if (!(nlh->nlmsg_flags & NLM_F_CREATE)) { - err = -ENOENT; - goto out; - } - - neigh = __neigh_lookup_errno(tbl, dst, dev); - if (IS_ERR(neigh)) { - err = PTR_ERR(neigh); - goto out; - } - } else { - if (nlh->nlmsg_flags & NLM_F_EXCL) { - err = -EEXIST; - neigh_release(neigh); - goto out; - } - - if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) - flags &= ~NEIGH_UPDATE_F_OVERRIDE; } - if (ndm->ndm_flags & NTF_USE) { - neigh_event_send(neigh, NULL); - err = 0; - } else - err = neigh_update(neigh, lladdr, ndm->ndm_state, flags); - neigh_release(neigh); - goto out; + if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) + flags &= ~NEIGH_UPDATE_F_OVERRIDE; } - read_unlock(&neigh_tbl_lock); - err = -EAFNOSUPPORT; + if (ndm->ndm_flags & NTF_USE) { + neigh_event_send(neigh, NULL); + err = 0; + } else + err = neigh_update(neigh, lladdr, ndm->ndm_state, flags); + neigh_release(neigh); + out: return err; } @@ -1990,7 +1955,8 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) struct neigh_table *tbl; struct ndtmsg *ndtmsg; struct nlattr *tb[NDTA_MAX+1]; - int err; + bool found = false; + int err, tidx; err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX, nl_neightbl_policy); @@ -2003,19 +1969,21 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) } ndtmsg = nlmsg_data(nlh); - read_lock(&neigh_tbl_lock); - for (tbl = neigh_tables; tbl; tbl = tbl->next) { + + for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { + tbl = neigh_tables[tidx]; + if (!tbl) + continue; if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) continue; - - if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) + if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) { + found = true; break; + } } - if (tbl == NULL) { - err = -ENOENT; - goto errout_locked; - } + if (!found) + return -ENOENT; /* * We acquire tbl->lock to be nice to the periodic timers and @@ -2126,8 +2094,6 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) errout_tbl_lock: write_unlock_bh(&tbl->lock); -errout_locked: - read_unlock(&neigh_tbl_lock); errout: return err; } @@ -2142,10 +2108,13 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; - read_lock(&neigh_tbl_lock); - for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) { + for (tidx = 0; tidx < NEIGH_NR_TABLES; tidx++) { struct neigh_parms *p; + tbl = neigh_tables[tidx]; + if (!tbl) + continue; + if (tidx < tbl_skip || (family && tbl->family != family)) continue; @@ -2154,7 +2123,9 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) NLM_F_MULTI) <= 0) break; - for (nidx = 0, p = tbl->parms.next; p; p = p->next) { + nidx = 0; + p = list_next_entry(&tbl->parms, list); + list_for_each_entry_from(p, &tbl->parms_list, list) { if (!net_eq(neigh_parms_net(p), net)) continue; @@ -2174,7 +2145,6 @@ static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) neigh_skip = 0; } out: - read_unlock(&neigh_tbl_lock); cb->args[0] = tidx; cb->args[1] = nidx; @@ -2357,7 +2327,6 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) int proxy = 0; int err; - read_lock(&neigh_tbl_lock); family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; /* check for full ndmsg structure presence, family member is @@ -2369,8 +2338,11 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; - for (tbl = neigh_tables, t = 0; tbl; - tbl = tbl->next, t++) { + for (t = 0; t < NEIGH_NR_TABLES; t++) { + tbl = neigh_tables[t]; + + if (!tbl) + continue; if (t < s_t || (family && tbl->family != family)) continue; if (t > s_t) @@ -2383,7 +2355,6 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) if (err < 0) break; } - read_unlock(&neigh_tbl_lock); cb->args[0] = t; return skb->len; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 9dd06699b09c..1a24602cd54e 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -325,6 +325,23 @@ static ssize_t tx_queue_len_store(struct device *dev, } NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong); +static int change_gro_flush_timeout(struct net_device *dev, unsigned long val) +{ + dev->gro_flush_timeout = val; + return 0; +} + +static ssize_t gro_flush_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + return netdev_store(dev, attr, buf, len, change_gro_flush_timeout); +} +NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong); + static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { @@ -422,6 +439,7 @@ static struct attribute *net_class_attrs[] = { &dev_attr_mtu.attr, &dev_attr_flags.attr, &dev_attr_tx_queue_len.attr, + &dev_attr_gro_flush_timeout.attr, &dev_attr_phys_port_id.attr, NULL, }; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index e6645b4f330a..e0ad5d16c9c5 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -79,8 +79,7 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, if (vlan_tx_tag_present(skb) && !vlan_hw_offload_capable(features, skb->vlan_proto)) { - skb = __vlan_put_tag(skb, skb->vlan_proto, - vlan_tx_tag_get(skb)); + skb = __vlan_hwaccel_push_inside(skb); if (unlikely(!skb)) { /* This is actually a packet drop, but we * don't want the code that calls this @@ -88,7 +87,6 @@ static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, */ goto out; } - skb->vlan_tci = 0; } status = netdev_start_xmit(skb, dev, txq, false); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 443256bdcddc..da934fc3faa8 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3728,8 +3728,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, /* Remove proc before if_list entry, because add_device uses * list to determine if interface already exist, avoid race * with proc_create_data() */ - if (pkt_dev->entry) - proc_remove(pkt_dev->entry); + proc_remove(pkt_dev->entry); /* And update the thread if_list */ _rem_dev_from_if_list(t, pkt_dev); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 61059a05ec95..c906c5f4bf69 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3013,7 +3013,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, if (nskb->len == len + doffset) goto perform_csum_check; - if (!sg) { + if (!sg && !nskb->remcsum_offload) { nskb->ip_summed = CHECKSUM_NONE; nskb->csum = skb_copy_and_csum_bits(head_skb, offset, skb_put(nskb, len), @@ -3085,7 +3085,7 @@ skip_fraglist: nskb->truesize += nskb->data_len; perform_csum_check: - if (!csum) { + if (!csum && !nskb->remcsum_offload) { nskb->csum = skb_checksum(nskb, doffset, nskb->len - doffset, 0); nskb->ip_summed = CHECKSUM_NONE; @@ -3099,6 +3099,16 @@ perform_csum_check: * (see validate_xmit_skb_list() for example) */ segs->prev = tail; + + /* Following permits correct backpressure, for protocols + * using skb_set_owner_w(). + * Idea is to tranfert ownership from head_skb to last segment. + */ + if (head_skb->destructor == sock_wfree) { + swap(tail->truesize, head_skb->truesize); + swap(tail->destructor, head_skb->destructor); + swap(tail->sk, head_skb->sk); + } return segs; err: @@ -4070,15 +4080,22 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet); unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) { const struct skb_shared_info *shinfo = skb_shinfo(skb); + unsigned int thlen = 0; - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) - return tcp_hdrlen(skb) + shinfo->gso_size; + if (skb->encapsulation) { + thlen = skb_inner_transport_header(skb) - + skb_transport_header(skb); + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) + thlen += inner_tcp_hdrlen(skb); + } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) { + thlen = tcp_hdrlen(skb); + } /* UFO sets gso_size to the size of the fragmentation * payload, i.e. the size of the L4 (UDP) header is already * accounted for. */ - return shinfo->gso_size; + return thlen + shinfo->gso_size; } EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); @@ -4134,6 +4151,113 @@ err_free: } EXPORT_SYMBOL(skb_vlan_untag); +int skb_ensure_writable(struct sk_buff *skb, int write_len) +{ + if (!pskb_may_pull(skb, write_len)) + return -ENOMEM; + + if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) + return 0; + + return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +} +EXPORT_SYMBOL(skb_ensure_writable); + +/* remove VLAN header from packet and update csum accordingly. */ +static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) +{ + struct vlan_hdr *vhdr; + unsigned int offset = skb->data - skb_mac_header(skb); + int err; + + __skb_push(skb, offset); + err = skb_ensure_writable(skb, VLAN_ETH_HLEN); + if (unlikely(err)) + goto pull; + + skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); + + vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); + *vlan_tci = ntohs(vhdr->h_vlan_TCI); + + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + __skb_pull(skb, VLAN_HLEN); + + vlan_set_encap_proto(skb, vhdr); + skb->mac_header += VLAN_HLEN; + + if (skb_network_offset(skb) < ETH_HLEN) + skb_set_network_header(skb, ETH_HLEN); + + skb_reset_mac_len(skb); +pull: + __skb_pull(skb, offset); + + return err; +} + +int skb_vlan_pop(struct sk_buff *skb) +{ + u16 vlan_tci; + __be16 vlan_proto; + int err; + + if (likely(vlan_tx_tag_present(skb))) { + skb->vlan_tci = 0; + } else { + if (unlikely((skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) || + skb->len < VLAN_ETH_HLEN)) + return 0; + + err = __skb_vlan_pop(skb, &vlan_tci); + if (err) + return err; + } + /* move next vlan tag to hw accel tag */ + if (likely((skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) || + skb->len < VLAN_ETH_HLEN)) + return 0; + + vlan_proto = skb->protocol; + err = __skb_vlan_pop(skb, &vlan_tci); + if (unlikely(err)) + return err; + + __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); + return 0; +} +EXPORT_SYMBOL(skb_vlan_pop); + +int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) +{ + if (vlan_tx_tag_present(skb)) { + unsigned int offset = skb->data - skb_mac_header(skb); + int err; + + /* __vlan_insert_tag expect skb->data pointing to mac header. + * So change skb->data before calling it and change back to + * original position later + */ + __skb_push(skb, offset); + err = __vlan_insert_tag(skb, skb->vlan_proto, + vlan_tx_tag_get(skb)); + if (err) + return err; + skb->protocol = skb->vlan_proto; + skb->mac_len += VLAN_HLEN; + __skb_pull(skb, offset); + + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_add(skb->csum, csum_partial(skb->data + + (2 * ETH_ALEN), VLAN_HLEN, 0)); + } + __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); + return 0; +} +EXPORT_SYMBOL(skb_vlan_push); + /** * alloc_skb_with_frags - allocate skb with page frags * diff --git a/net/core/sock.c b/net/core/sock.c index 15e0c67b1069..0725cf0cb685 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1213,6 +1213,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, v.val = sk->sk_max_pacing_rate; break; + case SO_INCOMING_CPU: + v.val = sk->sk_incoming_cpu; + break; + default: return -ENOPROTOOPT; } @@ -1517,6 +1521,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) newsk->sk_err = 0; newsk->sk_priority = 0; + newsk->sk_incoming_cpu = raw_smp_processor_id(); /* * Before updating sk_refcnt, we must commit prior changes to memory * (Documentation/RCU/rculist_nulls.txt for details) @@ -2457,7 +2462,7 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, msg->msg_flags |= MSG_TRUNC; copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index cf9cd13509a7..31baba2a71ce 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -26,6 +26,8 @@ static int zero = 0; static int one = 1; static int ushort_max = USHRT_MAX; +static int net_msg_warn; /* Unused, but still a sysctl */ + #ifdef CONFIG_RPS static int rps_sock_flow_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -215,6 +217,18 @@ static int set_default_qdisc(struct ctl_table *table, int write, } #endif +static int proc_do_rss_key(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table fake_table; + char buf[NETDEV_RSS_KEY_LEN * 3]; + + snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key); + fake_table.data = buf; + fake_table.maxlen = sizeof(buf); + return proc_dostring(&fake_table, write, buffer, lenp, ppos); +} + static struct ctl_table net_core_table[] = { #ifdef CONFIG_NET { @@ -263,6 +277,13 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "netdev_rss_key", + .data = &netdev_rss_key, + .maxlen = sizeof(int), + .mode = 0444, + .proc_handler = proc_do_rss_key, + }, #ifdef CONFIG_BPF_JIT { .procname = "bpf_jit_enable", diff --git a/net/core/tso.c b/net/core/tso.c index 8c3203c585b0..630b30b4fb53 100644 --- a/net/core/tso.c +++ b/net/core/tso.c @@ -1,6 +1,7 @@ #include <linux/export.h> #include <net/ip.h> #include <net/tso.h> +#include <asm/unaligned.h> /* Calculate expected number of TX descriptors */ int tso_count_descs(struct sk_buff *skb) @@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso, iph->id = htons(tso->ip_id); iph->tot_len = htons(size + hdr_len - mac_hdr_len); tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); - tcph->seq = htonl(tso->tcp_seq); + put_unaligned_be32(tso->tcp_seq, &tcph->seq); tso->ip_id++; if (!is_last) { diff --git a/net/core/utils.c b/net/core/utils.c index efc76dd9dcd1..7b803884c162 100644 --- a/net/core/utils.c +++ b/net/core/utils.c @@ -33,9 +33,6 @@ #include <asm/byteorder.h> #include <asm/uaccess.h> -int net_msg_warn __read_mostly = 1; -EXPORT_SYMBOL(net_msg_warn); - DEFINE_RATELIMIT_STATE(net_ratelimit_state, 5 * HZ, 10); /* * All net warning printk()s should be guarded by this function. diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index ba07824af4c0..bd9e718c2a20 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -218,7 +218,7 @@ static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets, * different underlying data structure. */ for (num_packets = num_cells = 1; lost_packets; ++num_cells) { - u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN); + u8 len = min_t(u32, lost_packets, DCCPAV_MAX_RUNLEN); av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1); av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len; diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index c67816647cce..e4c144fa706f 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -22,8 +22,8 @@ /* * DCCP - specific warning and debugging macros. */ -#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \ - __func__, ##a) +#define DCCP_WARN(fmt, ...) \ + net_warn_ratelimited("%s: " fmt, __func__, ##__VA_ARGS__) #define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \ __FILE__, __LINE__, __func__) #define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0) diff --git a/net/dccp/feat.c b/net/dccp/feat.c index 9733ddbc96cb..1704948e6a12 100644 --- a/net/dccp/feat.c +++ b/net/dccp/feat.c @@ -478,7 +478,7 @@ static struct dccp_feat_entry * * @fn_list: feature-negotiation list to update * @feat: one of %dccp_feature_numbers * @local: whether local (1) or remote (0) @feat_num is meant - * @needs_mandatory: whether to use Mandatory feature negotiation options + * @mandatory: whether to use Mandatory feature negotiation options * @fval: pointer to NN/SP value to be inserted (will be copied) */ static int dccp_feat_push_change(struct list_head *fn_list, u8 feat, u8 local, @@ -1050,7 +1050,7 @@ static u8 dccp_feat_prefer(u8 preferred_value, u8 *array, u8 array_len) /** * dccp_feat_reconcile - Reconcile SP preference lists - * @fval: SP list to reconcile into + * @fv: SP list to reconcile into * @arr: received SP preference list * @len: length of @arr in bytes * @is_server: whether this side is the server (and @fv is the server's list) diff --git a/net/dccp/input.c b/net/dccp/input.c index 3c8ec7d4a34e..3bd14e885396 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -537,7 +537,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, case DCCP_PKT_DATAACK: case DCCP_PKT_ACK: /* - * FIXME: we should be reseting the PARTOPEN (DELACK) timer + * FIXME: we should be resetting the PARTOPEN (DELACK) timer * here but only if we haven't used the DELACK timer for * something else, like sending a delayed ack for a TIMESTAMP * echo, etc, for now were not clearing it, sending an extra diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 6ca645c4b48e..e45b968613a4 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -140,7 +140,6 @@ failure: inet->inet_dport = 0; goto out; } - EXPORT_SYMBOL_GPL(dccp_v4_connect); /* @@ -376,7 +375,6 @@ void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb) inet->inet_saddr, inet->inet_daddr); } - EXPORT_SYMBOL_GPL(dccp_v4_send_check); static inline u64 dccp_v4_init_sequence(const struct sk_buff *skb) @@ -444,7 +442,6 @@ put_and_exit: dccp_done(newsk); goto exit; } - EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock); static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) @@ -670,7 +667,6 @@ drop: DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); return -1; } - EXPORT_SYMBOL_GPL(dccp_v4_conn_request); int dccp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) @@ -729,7 +725,6 @@ discard: kfree_skb(skb); return 0; } - EXPORT_SYMBOL_GPL(dccp_v4_do_rcv); /** @@ -802,7 +797,6 @@ int dccp_invalid_packet(struct sk_buff *skb) return 0; } - EXPORT_SYMBOL_GPL(dccp_invalid_packet); /* this is called when real data arrives */ diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 5ab6627cf370..8e6ae9422a7b 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -896,7 +896,7 @@ verify_sock_status: else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; - if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) { + if (skb_copy_datagram_msg(skb, 0, msg, len)) { /* Exception. Bailout! */ len = -EFAULT; break; diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c index c8121ceddb9e..7ca7c3143da3 100644 --- a/net/decnet/dn_neigh.c +++ b/net/decnet/dn_neigh.c @@ -591,7 +591,7 @@ static const struct file_operations dn_neigh_seq_fops = { void __init dn_neigh_init(void) { - neigh_table_init(&dn_neigh_table); + neigh_table_init(NEIGH_DN_TABLE, &dn_neigh_table); proc_create("decnet_neigh", S_IRUGO, init_net.proc_net, &dn_neigh_seq_fops); } @@ -599,5 +599,5 @@ void __init dn_neigh_init(void) void __exit dn_neigh_cleanup(void) { remove_proc_entry("decnet_neigh", init_net.proc_net); - neigh_table_clear(&dn_neigh_table); + neigh_table_clear(NEIGH_DN_TABLE, &dn_neigh_table); } diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index a585fd6352eb..5f8ac404535b 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -11,6 +11,17 @@ config NET_DSA if NET_DSA +config NET_DSA_HWMON + bool "Distributed Switch Architecture HWMON support" + default y + depends on HWMON && !(NET_DSA=y && HWMON=m) + ---help--- + Say Y if you want to expose thermal sensor data on switches supported + by the Distributed Switch Architecture. + + Some of those switches contain thermal sensors. This data is available + via the hwmon sysfs interface and exposes the onboard sensors. + # tagging formats config NET_DSA_TAG_BRCM bool diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 22f34cf4cb27..322c778487e7 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -9,6 +9,9 @@ * (at your option) any later version. */ +#include <linux/ctype.h> +#include <linux/device.h> +#include <linux/hwmon.h> #include <linux/list.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -17,6 +20,7 @@ #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> +#include <linux/sysfs.h> #include "dsa_priv.h" char dsa_driver_version[] = "0.1"; @@ -71,6 +75,104 @@ dsa_switch_probe(struct device *host_dev, int sw_addr, char **_name) return ret; } +/* hwmon support ************************************************************/ + +#ifdef CONFIG_NET_DSA_HWMON + +static ssize_t temp1_input_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = ds->drv->get_temp(ds, &temp); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", temp * 1000); +} +static DEVICE_ATTR_RO(temp1_input); + +static ssize_t temp1_max_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = ds->drv->get_temp_limit(ds, &temp); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", temp * 1000); +} + +static ssize_t temp1_max_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + int temp, ret; + + ret = kstrtoint(buf, 0, &temp); + if (ret < 0) + return ret; + + ret = ds->drv->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000)); + if (ret < 0) + return ret; + + return count; +} +static DEVICE_ATTR(temp1_max, S_IRUGO, temp1_max_show, temp1_max_store); + +static ssize_t temp1_max_alarm_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dsa_switch *ds = dev_get_drvdata(dev); + bool alarm; + int ret; + + ret = ds->drv->get_temp_alarm(ds, &alarm); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", alarm); +} +static DEVICE_ATTR_RO(temp1_max_alarm); + +static struct attribute *dsa_hwmon_attrs[] = { + &dev_attr_temp1_input.attr, /* 0 */ + &dev_attr_temp1_max.attr, /* 1 */ + &dev_attr_temp1_max_alarm.attr, /* 2 */ + NULL +}; + +static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj, + struct attribute *attr, int index) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct dsa_switch *ds = dev_get_drvdata(dev); + struct dsa_switch_driver *drv = ds->drv; + umode_t mode = attr->mode; + + if (index == 1) { + if (!drv->get_temp_limit) + mode = 0; + else if (drv->set_temp_limit) + mode |= S_IWUSR; + } else if (index == 2 && !drv->get_temp_alarm) { + mode = 0; + } + return mode; +} + +static const struct attribute_group dsa_hwmon_group = { + .attrs = dsa_hwmon_attrs, + .is_visible = dsa_hwmon_attrs_visible, +}; +__ATTRIBUTE_GROUPS(dsa_hwmon); + +#endif /* CONFIG_NET_DSA_HWMON */ /* basic switch operations **************************************************/ static struct dsa_switch * @@ -90,12 +192,12 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, */ drv = dsa_switch_probe(host_dev, pd->sw_addr, &name); if (drv == NULL) { - printk(KERN_ERR "%s[%d]: could not detect attached switch\n", - dst->master_netdev->name, index); + netdev_err(dst->master_netdev, "[%d]: could not detect attached switch\n", + index); return ERR_PTR(-EINVAL); } - printk(KERN_INFO "%s[%d]: detected a %s switch\n", - dst->master_netdev->name, index, name); + netdev_info(dst->master_netdev, "[%d]: detected a %s switch\n", + index, name); /* @@ -123,7 +225,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, if (!strcmp(name, "cpu")) { if (dst->cpu_switch != -1) { - printk(KERN_ERR "multiple cpu ports?!\n"); + netdev_err(dst->master_netdev, + "multiple cpu ports?!\n"); ret = -EINVAL; goto out; } @@ -174,8 +277,11 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, dst->rcv = brcm_netdev_ops.rcv; break; #endif - default: + case DSA_TAG_PROTO_NONE: break; + default: + ret = -ENOPROTOOPT; + goto out; } dst->tag_protocol = drv->tag_protocol; @@ -215,16 +321,39 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index, slave_dev = dsa_slave_create(ds, parent, i, pd->port_names[i]); if (slave_dev == NULL) { - printk(KERN_ERR "%s[%d]: can't create dsa " - "slave device for port %d(%s)\n", - dst->master_netdev->name, - index, i, pd->port_names[i]); + netdev_err(dst->master_netdev, "[%d]: can't create dsa slave device for port %d(%s)\n", + index, i, pd->port_names[i]); continue; } ds->ports[i] = slave_dev; } +#ifdef CONFIG_NET_DSA_HWMON + /* If the switch provides a temperature sensor, + * register with hardware monitoring subsystem. + * Treat registration error as non-fatal and ignore it. + */ + if (drv->get_temp) { + const char *netname = netdev_name(dst->master_netdev); + char hname[IFNAMSIZ + 1]; + int i, j; + + /* Create valid hwmon 'name' attribute */ + for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) { + if (isalnum(netname[i])) + hname[j++] = netname[i]; + } + hname[j] = '\0'; + scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d", + hname, index); + ds->hwmon_dev = hwmon_device_register_with_groups(NULL, + ds->hwmon_name, ds, dsa_hwmon_groups); + if (IS_ERR(ds->hwmon_dev)) + ds->hwmon_dev = NULL; + } +#endif /* CONFIG_NET_DSA_HWMON */ + return ds; out_free: @@ -236,6 +365,10 @@ out: static void dsa_switch_destroy(struct dsa_switch *ds) { +#ifdef CONFIG_NET_DSA_HWMON + if (ds->hwmon_dev) + hwmon_device_unregister(ds->hwmon_dev); +#endif } #ifdef CONFIG_PM_SLEEP @@ -393,7 +526,8 @@ static int dsa_of_setup_routing_table(struct dsa_platform_data *pd, /* First time routing table allocation */ if (!cd->rtable) { - cd->rtable = kmalloc(pd->nr_chips * sizeof(s8), GFP_KERNEL); + cd->rtable = kmalloc_array(pd->nr_chips, sizeof(s8), + GFP_KERNEL); if (!cd->rtable) return -ENOMEM; @@ -444,6 +578,7 @@ static int dsa_of_probe(struct platform_device *pdev) const char *port_name; int chip_index, port_index; const unsigned int *sw_addr, *port_reg; + u32 eeprom_len; int ret; mdio = of_parse_phandle(np, "dsa,mii-bus", 0); @@ -472,8 +607,8 @@ static int dsa_of_probe(struct platform_device *pdev) if (pd->nr_chips > DSA_MAX_SWITCHES) pd->nr_chips = DSA_MAX_SWITCHES; - pd->chip = kzalloc(pd->nr_chips * sizeof(struct dsa_chip_data), - GFP_KERNEL); + pd->chip = kcalloc(pd->nr_chips, sizeof(struct dsa_chip_data), + GFP_KERNEL); if (!pd->chip) { ret = -ENOMEM; goto out_free; @@ -495,6 +630,9 @@ static int dsa_of_probe(struct platform_device *pdev) if (cd->sw_addr > PHY_MAX_ADDR) continue; + if (!of_property_read_u32(np, "eeprom-length", &eeprom_len)) + cd->eeprom_len = eeprom_len; + for_each_available_child_of_node(child, port) { port_reg = of_get_property(port, "reg", NULL); if (!port_reg) @@ -563,15 +701,13 @@ static inline void dsa_of_remove(struct platform_device *pdev) static int dsa_probe(struct platform_device *pdev) { - static int dsa_version_printed; struct dsa_platform_data *pd = pdev->dev.platform_data; struct net_device *dev; struct dsa_switch_tree *dst; int i, ret; - if (!dsa_version_printed++) - printk(KERN_NOTICE "Distributed Switch Architecture " - "driver version %s\n", dsa_driver_version); + pr_notice_once("Distributed Switch Architecture driver version %s\n", + dsa_driver_version); if (pdev->dev.of_node) { ret = dsa_of_probe(pdev); @@ -615,9 +751,8 @@ static int dsa_probe(struct platform_device *pdev) ds = dsa_switch_setup(dst, i, &pdev->dev, pd->chip[i].host_dev); if (IS_ERR(ds)) { - printk(KERN_ERR "%s[%d]: couldn't create dsa switch " - "instance (error %ld)\n", dev->name, i, - PTR_ERR(ds)); + netdev_err(dev, "[%d]: couldn't create dsa switch instance (error %ld)\n", + i, PTR_ERR(ds)); continue; } diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6d1817449c36..528380a3e296 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -249,6 +249,27 @@ static void dsa_slave_get_drvinfo(struct net_device *dev, strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); } +static int dsa_slave_get_regs_len(struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->get_regs_len) + return ds->drv->get_regs_len(ds, p->port); + + return -EOPNOTSUPP; +} + +static void +dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->get_regs) + ds->drv->get_regs(ds, p->port, regs, _p); +} + static int dsa_slave_nway_reset(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); @@ -271,6 +292,44 @@ static u32 dsa_slave_get_link(struct net_device *dev) return -EOPNOTSUPP; } +static int dsa_slave_get_eeprom_len(struct net_device *dev) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->pd->eeprom_len) + return ds->pd->eeprom_len; + + if (ds->drv->get_eeprom_len) + return ds->drv->get_eeprom_len(ds); + + return 0; +} + +static int dsa_slave_get_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->get_eeprom) + return ds->drv->get_eeprom(ds, eeprom, data); + + return -EOPNOTSUPP; +} + +static int dsa_slave_set_eeprom(struct net_device *dev, + struct ethtool_eeprom *eeprom, u8 *data) +{ + struct dsa_slave_priv *p = netdev_priv(dev); + struct dsa_switch *ds = p->parent; + + if (ds->drv->set_eeprom) + return ds->drv->set_eeprom(ds, eeprom, data); + + return -EOPNOTSUPP; +} + static void dsa_slave_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { @@ -385,8 +444,13 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = { .get_settings = dsa_slave_get_settings, .set_settings = dsa_slave_set_settings, .get_drvinfo = dsa_slave_get_drvinfo, + .get_regs_len = dsa_slave_get_regs_len, + .get_regs = dsa_slave_get_regs, .nway_reset = dsa_slave_nway_reset, .get_link = dsa_slave_get_link, + .get_eeprom_len = dsa_slave_get_eeprom_len, + .get_eeprom = dsa_slave_get_eeprom, + .set_eeprom = dsa_slave_set_eeprom, .get_strings = dsa_slave_get_strings, .get_ethtool_stats = dsa_slave_get_ethtool_stats, .get_sset_count = dsa_slave_get_sset_count, @@ -468,7 +532,7 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p, */ ret = of_phy_register_fixed_link(port_dn); if (ret) { - pr_err("failed to register fixed PHY\n"); + netdev_err(slave_dev, "failed to register fixed PHY\n"); return; } phy_is_fixed = true; @@ -489,11 +553,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p, /* We could not connect to a designated PHY, so use the switch internal * MDIO bus instead */ - if (!p->phy) + if (!p->phy) { p->phy = ds->slave_mii_bus->phy_map[p->port]; - else - pr_info("attached PHY at address %d [%s]\n", - p->phy->addr, p->phy->drv->name); + phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, + p->phy_interface); + } else { + netdev_info(slave_dev, "attached PHY at address %d [%s]\n", + p->phy->addr, p->phy->drv->name); + } } int dsa_slave_suspend(struct net_device *slave_dev) @@ -590,8 +657,8 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent, ret = register_netdev(slave_dev); if (ret) { - printk(KERN_ERR "%s: error %d registering interface %s\n", - master->name, ret, slave_dev->name); + netdev_err(master, "error %d registering interface %s\n", + ret, slave_dev->name); free_netdev(slave_dev); return NULL; } diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c index ce90c8bdc658..2dab27063273 100644 --- a/net/dsa/tag_dsa.c +++ b/net/dsa/tag_dsa.c @@ -63,8 +63,6 @@ static netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev) dsa_header[3] = 0x00; } - skb->protocol = htons(ETH_P_DSA); - skb->dev = p->parent->dst->master_netdev; dev_queue_xmit(skb); diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c index 94fcce778679..9aeda596f7ec 100644 --- a/net/dsa/tag_edsa.c +++ b/net/dsa/tag_edsa.c @@ -76,8 +76,6 @@ static netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev) edsa_header[7] = 0x00; } - skb->protocol = htons(ETH_P_EDSA); - skb->dev = p->parent->dst->master_netdev; dev_queue_xmit(skb); diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c index 115fdca34077..e268f9db8893 100644 --- a/net/dsa/tag_trailer.c +++ b/net/dsa/tag_trailer.c @@ -57,8 +57,6 @@ static netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev) trailer[2] = 0x10; trailer[3] = 0x00; - nskb->protocol = htons(ETH_P_TRAILER); - nskb->dev = p->parent->dst->master_netdev; dev_queue_xmit(nskb); diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c index 3d58befef467..b8555ec71387 100644 --- a/net/ieee802154/dgram.c +++ b/net/ieee802154/dgram.c @@ -320,7 +320,7 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk, } /* FIXME: skip headers if necessary ?! */ - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c index 3ffcf4a9de01..21c38945ab8b 100644 --- a/net/ieee802154/raw.c +++ b/net/ieee802154/raw.c @@ -191,7 +191,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index e682b48e0709..bd2901604842 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -322,6 +322,15 @@ config NET_FOU network mechanisms and optimizations for UDP (such as ECMP and RSS) can be leveraged to provide better service. +config NET_FOU_IP_TUNNELS + bool "IP: FOU encapsulation of IP tunnels" + depends on NET_IPIP || NET_IPGRE || IPV6_SIT + select NET_FOU + ---help--- + Allow configuration of FOU or GUE encapsulation for IP tunnels. + When this option is enabled IP tunnels can be configured to use + FOU or GUE encapsulation. + config GENEVE tristate "Generic Network Virtualization Encapsulation (Geneve)" depends on INET diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 92db7a69f2b9..3a096bb2d596 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1222,7 +1222,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, SKB_GSO_TCPV6 | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | - SKB_GSO_MPLS | + SKB_GSO_TUNNEL_REMCSUM | 0))) goto out; @@ -1246,7 +1246,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) - features = skb->dev->hw_enc_features & netif_skb_features(skb); + features &= skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += ihl; skb_reset_transport_header(skb); diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 16acb59d665e..205e1472aa78 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -1292,7 +1292,7 @@ static int arp_proc_init(void); void __init arp_init(void) { - neigh_table_init(&arp_tbl); + neigh_table_init(NEIGH_ARP_TABLE, &arp_tbl); dev_add_pack(&arp_packet_type); arp_proc_init(); diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 4715f25dfe03..5160c710f2eb 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -50,7 +50,7 @@ #include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/atomic.h> -#include <asm/bug.h> +#include <linux/bug.h> #include <asm/unaligned.h> /* List of available DOI definitions */ @@ -72,6 +72,7 @@ struct cipso_v4_map_cache_bkt { u32 size; struct list_head list; }; + struct cipso_v4_map_cache_entry { u32 hash; unsigned char *key; @@ -82,7 +83,8 @@ struct cipso_v4_map_cache_entry { u32 activity; struct list_head list; }; -static struct cipso_v4_map_cache_bkt *cipso_v4_cache = NULL; + +static struct cipso_v4_map_cache_bkt *cipso_v4_cache; /* Restricted bitmap (tag #1) flags */ int cipso_v4_rbm_optfmt = 0; @@ -539,7 +541,7 @@ doi_add_return: /** * cipso_v4_doi_free - Frees a DOI definition - * @entry: the entry's RCU field + * @doi_def: the DOI definition * * Description: * This function frees all of the memory associated with a DOI definition. diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 360b565918c4..60173d4d3a0e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -392,8 +392,10 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb) if (elen <= 0) goto out; - if ((err = skb_cow_data(skb, 0, &trailer)) < 0) + err = skb_cow_data(skb, 0, &trailer); + if (err < 0) goto out; + nfrags = err; assoclen = sizeof(*esph); @@ -601,12 +603,12 @@ static int esp_init_authenc(struct xfrm_state *x) BUG_ON(!aalg_desc); err = -EINVAL; - if (aalg_desc->uinfo.auth.icv_fullbits/8 != + if (aalg_desc->uinfo.auth.icv_fullbits / 8 != crypto_aead_authsize(aead)) { - NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", - x->aalg->alg_name, - crypto_aead_authsize(aead), - aalg_desc->uinfo.auth.icv_fullbits/8); + pr_info("ESP: %s digestsize %u != %hu\n", + x->aalg->alg_name, + crypto_aead_authsize(aead), + aalg_desc->uinfo.auth.icv_fullbits / 8); goto free_key; } diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 32e78924e246..3dfe9828e7ef 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -38,21 +38,17 @@ static inline struct fou *fou_from_sock(struct sock *sk) return sk->sk_user_data; } -static int fou_udp_encap_recv_deliver(struct sk_buff *skb, - u8 protocol, size_t len) +static void fou_recv_pull(struct sk_buff *skb, size_t len) { struct iphdr *iph = ip_hdr(skb); /* Remove 'len' bytes from the packet (UDP header and - * FOU header if present), modify the protocol to the one - * we found, and then call rcv_encap. + * FOU header if present). */ iph->tot_len = htons(ntohs(iph->tot_len) - len); __skb_pull(skb, len); skb_postpull_rcsum(skb, udp_hdr(skb), len); skb_reset_transport_header(skb); - - return -protocol; } static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) @@ -62,16 +58,78 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb) if (!fou) return 1; - return fou_udp_encap_recv_deliver(skb, fou->protocol, - sizeof(struct udphdr)); + fou_recv_pull(skb, sizeof(struct udphdr)); + + return -fou->protocol; +} + +static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, + void *data, int hdrlen, u8 ipproto) +{ + __be16 *pd = data; + u16 start = ntohs(pd[0]); + u16 offset = ntohs(pd[1]); + u16 poffset = 0; + u16 plen; + __wsum csum, delta; + __sum16 *psum; + + if (skb->remcsum_offload) { + /* Already processed in GRO path */ + skb->remcsum_offload = 0; + return guehdr; + } + + if (start > skb->len - hdrlen || + offset > skb->len - hdrlen - sizeof(u16)) + return NULL; + + if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) + __skb_checksum_complete(skb); + + plen = hdrlen + offset + sizeof(u16); + if (!pskb_may_pull(skb, plen)) + return NULL; + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; + + if (ipproto == IPPROTO_IP && sizeof(struct iphdr) < plen) { + struct iphdr *ip = (struct iphdr *)(skb->data + hdrlen); + + /* If next header happens to be IP we can skip that for the + * checksum calculation since the IP header checksum is zero + * if correct. + */ + poffset = ip->ihl * 4; + } + + csum = csum_sub(skb->csum, skb_checksum(skb, poffset + hdrlen, + start - poffset - hdrlen, 0)); + + /* Set derived checksum in packet */ + psum = (__sum16 *)(skb->data + hdrlen + offset); + delta = csum_sub(csum_fold(csum), *psum); + *psum = csum_fold(csum); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); + + return guehdr; +} + +static int gue_control_message(struct sk_buff *skb, struct guehdr *guehdr) +{ + /* No support yet */ + kfree_skb(skb); + return 0; } static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) { struct fou *fou = fou_from_sock(sk); - size_t len; + size_t len, optlen, hdrlen; struct guehdr *guehdr; - struct udphdr *uh; + void *data; + u16 doffset = 0; if (!fou) return 1; @@ -80,25 +138,61 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb) if (!pskb_may_pull(skb, len)) goto drop; - uh = udp_hdr(skb); - guehdr = (struct guehdr *)&uh[1]; + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; + + optlen = guehdr->hlen << 2; + len += optlen; - len += guehdr->hlen << 2; if (!pskb_may_pull(skb, len)) goto drop; - uh = udp_hdr(skb); - guehdr = (struct guehdr *)&uh[1]; + /* guehdr may change after pull */ + guehdr = (struct guehdr *)&udp_hdr(skb)[1]; - if (guehdr->version != 0) - goto drop; + hdrlen = sizeof(struct guehdr) + optlen; - if (guehdr->flags) { - /* No support yet */ + if (guehdr->version != 0 || validate_gue_flags(guehdr, optlen)) goto drop; + + hdrlen = sizeof(struct guehdr) + optlen; + + ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(skb)->tot_len) - len); + + /* Pull UDP header now, skb->data points to guehdr */ + __skb_pull(skb, sizeof(struct udphdr)); + + /* Pull csum through the guehdr now . This can be used if + * there is a remote checksum offload. + */ + skb_postpull_rcsum(skb, udp_hdr(skb), len); + + data = &guehdr[1]; + + if (guehdr->flags & GUE_FLAG_PRIV) { + __be32 flags = *(__be32 *)(data + doffset); + + doffset += GUE_LEN_PRIV; + + if (flags & GUE_PFLAG_REMCSUM) { + guehdr = gue_remcsum(skb, guehdr, data + doffset, + hdrlen, guehdr->proto_ctype); + if (!guehdr) + goto drop; + + data = &guehdr[1]; + + doffset += GUE_PLEN_REMCSUM; + } } - return fou_udp_encap_recv_deliver(skb, guehdr->next_hdr, len); + if (unlikely(guehdr->control)) + return gue_control_message(skb, guehdr); + + __skb_pull(skb, hdrlen); + skb_reset_transport_header(skb); + + return -guehdr->proto_ctype; + drop: kfree_skb(skb); return 0; @@ -133,6 +227,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff) int err = -ENOSYS; const struct net_offload **offloads; + udp_tunnel_gro_complete(skb, nhoff); + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); @@ -147,6 +243,66 @@ out_unlock: return err; } +static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, + struct guehdr *guehdr, void *data, + size_t hdrlen, u8 ipproto) +{ + __be16 *pd = data; + u16 start = ntohs(pd[0]); + u16 offset = ntohs(pd[1]); + u16 poffset = 0; + u16 plen; + void *ptr; + __wsum csum, delta; + __sum16 *psum; + + if (skb->remcsum_offload) + return guehdr; + + if (start > skb_gro_len(skb) - hdrlen || + offset > skb_gro_len(skb) - hdrlen - sizeof(u16) || + !NAPI_GRO_CB(skb)->csum_valid || skb->remcsum_offload) + return NULL; + + plen = hdrlen + offset + sizeof(u16); + + /* Pull checksum that will be written */ + if (skb_gro_header_hard(skb, off + plen)) { + guehdr = skb_gro_header_slow(skb, off + plen, off); + if (!guehdr) + return NULL; + } + + ptr = (void *)guehdr + hdrlen; + + if (ipproto == IPPROTO_IP && + (hdrlen + sizeof(struct iphdr) < plen)) { + struct iphdr *ip = (struct iphdr *)(ptr + hdrlen); + + /* If next header happens to be IP we can skip + * that for the checksum calculation since the + * IP header checksum is zero if correct. + */ + poffset = ip->ihl * 4; + } + + csum = csum_sub(NAPI_GRO_CB(skb)->csum, + csum_partial(ptr + poffset, start - poffset, 0)); + + /* Set derived checksum in packet */ + psum = (__sum16 *)(ptr + offset); + delta = csum_sub(csum_fold(csum), *psum); + *psum = csum_fold(csum); + + /* Adjust skb->csum since we changed the packet */ + skb->csum = csum_add(skb->csum, delta); + NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); + + skb->remcsum_offload = 1; + + return guehdr; +} + static struct sk_buff **gue_gro_receive(struct sk_buff **head, struct sk_buff *skb) { @@ -154,38 +310,64 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, const struct net_offload *ops; struct sk_buff **pp = NULL; struct sk_buff *p; - u8 proto; struct guehdr *guehdr; - unsigned int hlen, guehlen; - unsigned int off; + size_t len, optlen, hdrlen, off; + void *data; + u16 doffset = 0; int flush = 1; off = skb_gro_offset(skb); - hlen = off + sizeof(*guehdr); + len = off + sizeof(*guehdr); + guehdr = skb_gro_header_fast(skb, off); - if (skb_gro_header_hard(skb, hlen)) { - guehdr = skb_gro_header_slow(skb, hlen, off); + if (skb_gro_header_hard(skb, len)) { + guehdr = skb_gro_header_slow(skb, len, off); if (unlikely(!guehdr)) goto out; } - proto = guehdr->next_hdr; + optlen = guehdr->hlen << 2; + len += optlen; - rcu_read_lock(); - offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; - ops = rcu_dereference(offloads[proto]); - if (WARN_ON(!ops || !ops->callbacks.gro_receive)) - goto out_unlock; + if (skb_gro_header_hard(skb, len)) { + guehdr = skb_gro_header_slow(skb, len, off); + if (unlikely(!guehdr)) + goto out; + } - guehlen = sizeof(*guehdr) + (guehdr->hlen << 2); + if (unlikely(guehdr->control) || guehdr->version != 0 || + validate_gue_flags(guehdr, optlen)) + goto out; - hlen = off + guehlen; - if (skb_gro_header_hard(skb, hlen)) { - guehdr = skb_gro_header_slow(skb, hlen, off); - if (unlikely(!guehdr)) - goto out_unlock; + hdrlen = sizeof(*guehdr) + optlen; + + /* Adjust NAPI_GRO_CB(skb)->csum to account for guehdr, + * this is needed if there is a remote checkcsum offload. + */ + skb_gro_postpull_rcsum(skb, guehdr, hdrlen); + + data = &guehdr[1]; + + if (guehdr->flags & GUE_FLAG_PRIV) { + __be32 flags = *(__be32 *)(data + doffset); + + doffset += GUE_LEN_PRIV; + + if (flags & GUE_PFLAG_REMCSUM) { + guehdr = gue_gro_remcsum(skb, off, guehdr, + data + doffset, hdrlen, + guehdr->proto_ctype); + if (!guehdr) + goto out; + + data = &guehdr[1]; + + doffset += GUE_PLEN_REMCSUM; + } } + skb_gro_pull(skb, hdrlen); + flush = 0; for (p = *head; p; p = p->next) { @@ -197,7 +379,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, guehdr2 = (struct guehdr *)(p->data + off); /* Compare base GUE header to be equal (covers - * hlen, version, next_hdr, and flags. + * hlen, version, proto_ctype, and flags. */ if (guehdr->word != guehdr2->word) { NAPI_GRO_CB(p)->same_flow = 0; @@ -212,10 +394,11 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, } } - skb_gro_pull(skb, guehlen); - - /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/ - skb_gro_postpull_rcsum(skb, guehdr, guehlen); + rcu_read_lock(); + offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; + ops = rcu_dereference(offloads[guehdr->proto_ctype]); + if (WARN_ON(!ops || !ops->callbacks.gro_receive)) + goto out_unlock; pp = ops->callbacks.gro_receive(head, skb); @@ -236,7 +419,7 @@ static int gue_gro_complete(struct sk_buff *skb, int nhoff) u8 proto; int err = -ENOENT; - proto = guehdr->next_hdr; + proto = guehdr->proto_ctype; guehlen = sizeof(*guehdr) + (guehdr->hlen << 2); @@ -487,6 +670,200 @@ static const struct genl_ops fou_nl_ops[] = { }, }; +size_t fou_encap_hlen(struct ip_tunnel_encap *e) +{ + return sizeof(struct udphdr); +} +EXPORT_SYMBOL(fou_encap_hlen); + +size_t gue_encap_hlen(struct ip_tunnel_encap *e) +{ + size_t len; + bool need_priv = false; + + len = sizeof(struct udphdr) + sizeof(struct guehdr); + + if (e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) { + len += GUE_PLEN_REMCSUM; + need_priv = true; + } + + len += need_priv ? GUE_LEN_PRIV : 0; + + return len; +} +EXPORT_SYMBOL(gue_encap_hlen); + +static void fou_build_udp(struct sk_buff *skb, struct ip_tunnel_encap *e, + struct flowi4 *fl4, u8 *protocol, __be16 sport) +{ + struct udphdr *uh; + + skb_push(skb, sizeof(struct udphdr)); + skb_reset_transport_header(skb); + + uh = udp_hdr(skb); + + uh->dest = e->dport; + uh->source = sport; + uh->len = htons(skb->len); + uh->check = 0; + udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb, + fl4->saddr, fl4->daddr, skb->len); + + *protocol = IPPROTO_UDP; +} + +int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4) +{ + bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM); + int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + __be16 sport; + + skb = iptunnel_handle_offloads(skb, csum, type); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), + skb, 0, 0, false); + fou_build_udp(skb, e, fl4, protocol, sport); + + return 0; +} +EXPORT_SYMBOL(fou_build_header); + +int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, + u8 *protocol, struct flowi4 *fl4) +{ + bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM); + int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; + struct guehdr *guehdr; + size_t hdrlen, optlen = 0; + __be16 sport; + void *data; + bool need_priv = false; + + if ((e->flags & TUNNEL_ENCAP_FLAG_REMCSUM) && + skb->ip_summed == CHECKSUM_PARTIAL) { + csum = false; + optlen += GUE_PLEN_REMCSUM; + type |= SKB_GSO_TUNNEL_REMCSUM; + need_priv = true; + } + + optlen += need_priv ? GUE_LEN_PRIV : 0; + + skb = iptunnel_handle_offloads(skb, csum, type); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + /* Get source port (based on flow hash) before skb_push */ + sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), + skb, 0, 0, false); + + hdrlen = sizeof(struct guehdr) + optlen; + + skb_push(skb, hdrlen); + + guehdr = (struct guehdr *)skb->data; + + guehdr->control = 0; + guehdr->version = 0; + guehdr->hlen = optlen >> 2; + guehdr->flags = 0; + guehdr->proto_ctype = *protocol; + + data = &guehdr[1]; + + if (need_priv) { + __be32 *flags = data; + + guehdr->flags |= GUE_FLAG_PRIV; + *flags = 0; + data += GUE_LEN_PRIV; + + if (type & SKB_GSO_TUNNEL_REMCSUM) { + u16 csum_start = skb_checksum_start_offset(skb); + __be16 *pd = data; + + if (csum_start < hdrlen) + return -EINVAL; + + csum_start -= hdrlen; + pd[0] = htons(csum_start); + pd[1] = htons(csum_start + skb->csum_offset); + + if (!skb_is_gso(skb)) { + skb->ip_summed = CHECKSUM_NONE; + skb->encapsulation = 0; + } + + *flags |= GUE_PFLAG_REMCSUM; + data += GUE_PLEN_REMCSUM; + } + + } + + fou_build_udp(skb, e, fl4, protocol, sport); + + return 0; +} +EXPORT_SYMBOL(gue_build_header); + +#ifdef CONFIG_NET_FOU_IP_TUNNELS + +static const struct ip_tunnel_encap_ops __read_mostly fou_iptun_ops = { + .encap_hlen = fou_encap_hlen, + .build_header = fou_build_header, +}; + +static const struct ip_tunnel_encap_ops __read_mostly gue_iptun_ops = { + .encap_hlen = gue_encap_hlen, + .build_header = gue_build_header, +}; + +static int ip_tunnel_encap_add_fou_ops(void) +{ + int ret; + + ret = ip_tunnel_encap_add_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU); + if (ret < 0) { + pr_err("can't add fou ops\n"); + return ret; + } + + ret = ip_tunnel_encap_add_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE); + if (ret < 0) { + pr_err("can't add gue ops\n"); + ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU); + return ret; + } + + return 0; +} + +static void ip_tunnel_encap_del_fou_ops(void) +{ + ip_tunnel_encap_del_ops(&fou_iptun_ops, TUNNEL_ENCAP_FOU); + ip_tunnel_encap_del_ops(&gue_iptun_ops, TUNNEL_ENCAP_GUE); +} + +#else + +static int ip_tunnel_encap_add_fou_ops(void) +{ + return 0; +} + +static void ip_tunnel_encap_del_fou_ops(void) +{ +} + +#endif + static int __init fou_init(void) { int ret; @@ -494,6 +871,14 @@ static int __init fou_init(void) ret = genl_register_family_with_ops(&fou_nl_family, fou_nl_ops); + if (ret < 0) + goto exit; + + ret = ip_tunnel_encap_add_fou_ops(); + if (ret < 0) + genl_unregister_family(&fou_nl_family); + +exit: return ret; } @@ -501,6 +886,8 @@ static void __exit fou_fini(void) { struct fou *fou, *next; + ip_tunnel_encap_del_fou_ops(); + genl_unregister_family(&fou_nl_family); /* Close all the FOU sockets */ diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c index 065cd94c640c..a457232f0131 100644 --- a/net/ipv4/geneve.c +++ b/net/ipv4/geneve.c @@ -104,7 +104,7 @@ static void geneve_build_header(struct genevehdr *geneveh, memcpy(geneveh->options, options, options_len); } -/* Transmit a fully formated Geneve frame. +/* Transmit a fully formatted Geneve frame. * * When calling this function. The skb->data should point * to the geneve header which is fully formed. @@ -131,19 +131,15 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, if (unlikely(err)) return err; - if (vlan_tx_tag_present(skb)) { - if (unlikely(!__vlan_put_tag(skb, - skb->vlan_proto, - vlan_tx_tag_get(skb)))) { - err = -ENOMEM; - return err; - } - skb->vlan_tci = 0; - } + skb = vlan_hwaccel_push_inside(skb); + if (unlikely(!skb)) + return -ENOMEM; gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); + skb_set_inner_protocol(skb, htons(ETH_P_TEB)); + return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, tos, ttl, df, src_port, dst_port, xnet); } @@ -364,6 +360,7 @@ late_initcall(geneve_init_module); static void __exit geneve_cleanup_module(void) { destroy_workqueue(geneve_wq); + unregister_pernet_subsys(&geneve_net_ops); } module_exit(geneve_cleanup_module); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index ccda09628de7..bb5947b0ce2d 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -47,7 +47,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, greh = (struct gre_base_hdr *)skb_transport_header(skb); - ghl = skb_inner_network_header(skb) - skb_transport_header(skb); + ghl = skb_inner_mac_header(skb) - skb_transport_header(skb); if (unlikely(ghl < sizeof(*greh))) goto out; @@ -68,7 +68,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, skb->mac_len = skb_inner_network_offset(skb); /* segment inner packet. */ - enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + enc_features = skb->dev->hw_enc_features & features; segs = skb_mac_gso_segment(skb, enc_features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 5882f584910e..36f5584d93c5 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -190,7 +190,7 @@ EXPORT_SYMBOL(icmp_err_convert); */ struct icmp_control { - void (*handler)(struct sk_buff *skb); + bool (*handler)(struct sk_buff *skb); short error; /* This ICMP is classed as an error message */ }; @@ -746,7 +746,7 @@ static bool icmp_tag_validation(int proto) * ICMP_PARAMETERPROB. */ -static void icmp_unreach(struct sk_buff *skb) +static bool icmp_unreach(struct sk_buff *skb) { const struct iphdr *iph; struct icmphdr *icmph; @@ -784,8 +784,8 @@ static void icmp_unreach(struct sk_buff *skb) */ switch (net->ipv4.sysctl_ip_no_pmtu_disc) { default: - LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: fragmentation needed and DF set\n"), - &iph->daddr); + net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n", + &iph->daddr); break; case 2: goto out; @@ -798,8 +798,8 @@ static void icmp_unreach(struct sk_buff *skb) } break; case ICMP_SR_FAILED: - LIMIT_NETDEBUG(KERN_INFO pr_fmt("%pI4: Source Route Failed\n"), - &iph->daddr); + net_dbg_ratelimited("%pI4: Source Route Failed\n", + &iph->daddr); break; default: break; @@ -839,10 +839,10 @@ static void icmp_unreach(struct sk_buff *skb) icmp_socket_deliver(skb, info); out: - return; + return true; out_err: ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); - goto out; + return false; } @@ -850,17 +850,20 @@ out_err: * Handle ICMP_REDIRECT. */ -static void icmp_redirect(struct sk_buff *skb) +static bool icmp_redirect(struct sk_buff *skb) { if (skb->len < sizeof(struct iphdr)) { ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); - return; + return false; } - if (!pskb_may_pull(skb, sizeof(struct iphdr))) - return; + if (!pskb_may_pull(skb, sizeof(struct iphdr))) { + /* there aught to be a stat */ + return false; + } icmp_socket_deliver(skb, icmp_hdr(skb)->un.gateway); + return true; } /* @@ -875,7 +878,7 @@ static void icmp_redirect(struct sk_buff *skb) * See also WRT handling of options once they are done and working. */ -static void icmp_echo(struct sk_buff *skb) +static bool icmp_echo(struct sk_buff *skb) { struct net *net; @@ -891,6 +894,8 @@ static void icmp_echo(struct sk_buff *skb) icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); } + /* should there be an ICMP stat for ignored echos? */ + return true; } /* @@ -900,7 +905,7 @@ static void icmp_echo(struct sk_buff *skb) * MUST be accurate to a few minutes. * MUST be updated at least at 15Hz. */ -static void icmp_timestamp(struct sk_buff *skb) +static bool icmp_timestamp(struct sk_buff *skb) { struct timespec tv; struct icmp_bxm icmp_param; @@ -927,15 +932,17 @@ static void icmp_timestamp(struct sk_buff *skb) icmp_param.data_len = 0; icmp_param.head_len = sizeof(struct icmphdr) + 12; icmp_reply(&icmp_param, skb); -out: - return; + return true; + out_err: ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); - goto out; + return false; } -static void icmp_discard(struct sk_buff *skb) +static bool icmp_discard(struct sk_buff *skb) { + /* pretend it was a success */ + return true; } /* @@ -946,6 +953,7 @@ int icmp_rcv(struct sk_buff *skb) struct icmphdr *icmph; struct rtable *rt = skb_rtable(skb); struct net *net = dev_net(rt->dst.dev); + bool success; if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); @@ -1012,7 +1020,12 @@ int icmp_rcv(struct sk_buff *skb) } } - icmp_pointers[icmph->type].handler(skb); + success = icmp_pointers[icmph->type].handler(skb); + + if (success) { + consume_skb(skb); + return 0; + } drop: kfree_skb(skb); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index fb70e3ecc3e4..666cf364df86 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -112,17 +112,17 @@ #ifdef CONFIG_IP_MULTICAST /* Parameter names and values are taken from igmp-v2-06 draft */ -#define IGMP_V1_Router_Present_Timeout (400*HZ) -#define IGMP_V2_Router_Present_Timeout (400*HZ) -#define IGMP_V2_Unsolicited_Report_Interval (10*HZ) -#define IGMP_V3_Unsolicited_Report_Interval (1*HZ) -#define IGMP_Query_Response_Interval (10*HZ) -#define IGMP_Query_Robustness_Variable 2 +#define IGMP_V1_ROUTER_PRESENT_TIMEOUT (400*HZ) +#define IGMP_V2_ROUTER_PRESENT_TIMEOUT (400*HZ) +#define IGMP_V2_UNSOLICITED_REPORT_INTERVAL (10*HZ) +#define IGMP_V3_UNSOLICITED_REPORT_INTERVAL (1*HZ) +#define IGMP_QUERY_RESPONSE_INTERVAL (10*HZ) +#define IGMP_QUERY_ROBUSTNESS_VARIABLE 2 -#define IGMP_Initial_Report_Delay (1) +#define IGMP_INITIAL_REPORT_DELAY (1) -/* IGMP_Initial_Report_Delay is not from IGMP specs! +/* IGMP_INITIAL_REPORT_DELAY is not from IGMP specs! * IGMP specs require to report membership immediately after * joining a group, but we delay the first report by a * small interval. It seems more natural and still does not @@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) return scount; } -#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb)) - -static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) +static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) { struct sk_buff *skb; struct rtable *rt; @@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) struct flowi4 fl4; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + unsigned int size = mtu; while (1) { skb = alloc_skb(size + hlen + tlen, @@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) return NULL; } skb->priority = TC_PRIO_CONTROL; - igmp_skb_size(skb) = size; rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 0, 0, @@ -354,6 +352,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) skb_dst_set(skb, &rt->dst); skb->dev = dev; + skb->reserved_tailroom = skb_end_offset(skb) - + min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); skb_reset_network_header(skb); @@ -423,8 +423,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, return skb; } -#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \ - skb_tailroom(skb)) : 0) +#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) @@ -879,15 +878,15 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, if (ih->code == 0) { /* Alas, old v1 router presents here. */ - max_delay = IGMP_Query_Response_Interval; + max_delay = IGMP_QUERY_RESPONSE_INTERVAL; in_dev->mr_v1_seen = jiffies + - IGMP_V1_Router_Present_Timeout; + IGMP_V1_ROUTER_PRESENT_TIMEOUT; group = 0; } else { /* v2 router present */ max_delay = ih->code*(HZ/IGMP_TIMER_SCALE); in_dev->mr_v2_seen = jiffies + - IGMP_V2_Router_Present_Timeout; + IGMP_V2_ROUTER_PRESENT_TIMEOUT; } /* cancel the interface change timer */ in_dev->mr_ifc_count = 0; @@ -899,7 +898,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, return true; /* ignore bogus packet; freed by caller */ } else if (IGMP_V1_SEEN(in_dev)) { /* This is a v3 query with v1 queriers present */ - max_delay = IGMP_Query_Response_Interval; + max_delay = IGMP_QUERY_RESPONSE_INTERVAL; group = 0; } else if (IGMP_V2_SEEN(in_dev)) { /* this is a v3 query with v2 queriers present; @@ -1218,7 +1217,7 @@ static void igmp_group_added(struct ip_mc_list *im) return; if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { spin_lock_bh(&im->lock); - igmp_start_timer(im, IGMP_Initial_Report_Delay); + igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); spin_unlock_bh(&im->lock); return; } @@ -1541,7 +1540,7 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr) int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS; int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF; #ifdef CONFIG_IP_MULTICAST -int sysctl_igmp_qrv __read_mostly = IGMP_Query_Robustness_Variable; +int sysctl_igmp_qrv __read_mostly = IGMP_QUERY_ROBUSTNESS_VARIABLE; #endif static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, @@ -2687,11 +2686,7 @@ static int igmp_mcf_seq_show(struct seq_file *seq, void *v) struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq); if (v == SEQ_START_TOKEN) { - seq_printf(seq, - "%3s %6s " - "%10s %10s %6s %6s\n", "Idx", - "Device", "MCA", - "SRC", "INC", "EXC"); + seq_puts(seq, "Idx Device MCA SRC INC EXC\n"); } else { seq_printf(seq, "%3d %6.6s 0x%08x " diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 9eb89f3f0ee4..e7920352646a 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -146,7 +146,6 @@ evict_again: atomic_inc(&fq->refcnt); spin_unlock(&hb->chain_lock); del_timer_sync(&fq->timer); - WARN_ON(atomic_read(&fq->refcnt) != 1); inet_frag_put(fq, f); goto evict_again; } @@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) struct inet_frag_bucket *hb; hb = get_frag_bucket_locked(fq, f); - hlist_del(&fq->list); + if (!(fq->flags & INET_FRAG_EVICTED)) + hlist_del(&fq->list); spin_unlock(&hb->chain_lock); } @@ -458,6 +458,6 @@ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, ". Dropping fragment.\n"; if (PTR_ERR(q) == -ENOBUFS) - LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); + net_dbg_ratelimited("%s%s", prefix, msg); } EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 2811cc18701a..e5b6d0ddcb58 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -80,7 +80,7 @@ struct ipq { struct inet_peer *peer; }; -static inline u8 ip4_frag_ecn(u8 tos) +static u8 ip4_frag_ecn(u8 tos) { return 1 << (tos & INET_ECN_MASK); } @@ -148,7 +148,7 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a) inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; } -static __inline__ void ip4_frag_free(struct inet_frag_queue *q) +static void ip4_frag_free(struct inet_frag_queue *q) { struct ipq *qp; @@ -160,7 +160,7 @@ static __inline__ void ip4_frag_free(struct inet_frag_queue *q) /* Destruction primitives. */ -static __inline__ void ipq_put(struct ipq *ipq) +static void ipq_put(struct ipq *ipq) { inet_frag_put(&ipq->q, &ip4_frags); } @@ -236,7 +236,7 @@ out: /* Find the correct entry in the "incomplete datagrams" queue for * this IP datagram, and create new one, if nothing is found. */ -static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) +static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) { struct inet_frag_queue *q; struct ip4_create_arg arg; @@ -256,7 +256,7 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) } /* Is the fragment too far ahead to be part of ipq? */ -static inline int ip_frag_too_far(struct ipq *qp) +static int ip_frag_too_far(struct ipq *qp) { struct inet_peer *peer = qp->peer; unsigned int max = sysctl_ipfrag_max_dist; @@ -618,8 +618,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, return 0; out_nomem: - LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"), - qp); + net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); err = -ENOMEM; goto out_fail; out_oversize: @@ -795,16 +794,16 @@ static void __init ip4_frags_ctl_register(void) register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); } #else -static inline int ip4_frags_ns_ctl_register(struct net *net) +static int ip4_frags_ns_ctl_register(struct net *net) { return 0; } -static inline void ip4_frags_ns_ctl_unregister(struct net *net) +static void ip4_frags_ns_ctl_unregister(struct net *net) { } -static inline void __init ip4_frags_ctl_register(void) +static void __init ip4_frags_ctl_register(void) { } #endif diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 12055fdbe716..ac8491245e5b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -789,7 +789,7 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u16(skb, IFLA_GRE_ENCAP_DPORT, t->encap.dport) || nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS, - t->encap.dport)) + t->encap.flags)) goto nla_put_failure; return 0; diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 88e5ef2c7f51..4a929adf2ab7 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb) */ features = netif_skb_features(skb); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); - if (IS_ERR(segs)) { + if (IS_ERR_OR_NULL(segs)) { kfree_skb(skb); return -ENOMEM; } @@ -662,12 +662,10 @@ slow_path: if (len < left) { len &= ~7; } - /* - * Allocate buffer. - */ - if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) { - NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n"); + /* Allocate buffer */ + skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC); + if (!skb2) { err = -ENOMEM; goto fail; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index c373a9ad4555..b7826575d215 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; -#if defined(CONFIG_IPV6) +#if IS_ENABLED(CONFIG_IPV6) if (allow_ipv6 && cmsg->cmsg_level == SOL_IPV6 && cmsg->cmsg_type == IPV6_PKTINFO) { @@ -424,7 +424,7 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_TRUNC; copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 0bb8e141eacc..63e745aadab6 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -56,7 +56,6 @@ #include <net/netns/generic.h> #include <net/rtnetlink.h> #include <net/udp.h> -#include <net/gue.h> #if IS_ENABLED(CONFIG_IPV6) #include <net/ipv6.h> @@ -491,18 +490,51 @@ EXPORT_SYMBOL_GPL(ip_tunnel_rcv); static int ip_encap_hlen(struct ip_tunnel_encap *e) { - switch (e->type) { - case TUNNEL_ENCAP_NONE: + const struct ip_tunnel_encap_ops *ops; + int hlen = -EINVAL; + + if (e->type == TUNNEL_ENCAP_NONE) return 0; - case TUNNEL_ENCAP_FOU: - return sizeof(struct udphdr); - case TUNNEL_ENCAP_GUE: - return sizeof(struct udphdr) + sizeof(struct guehdr); - default: + + if (e->type >= MAX_IPTUN_ENCAP_OPS) return -EINVAL; - } + + rcu_read_lock(); + ops = rcu_dereference(iptun_encaps[e->type]); + if (likely(ops && ops->encap_hlen)) + hlen = ops->encap_hlen(e); + rcu_read_unlock(); + + return hlen; } +const struct ip_tunnel_encap_ops __rcu * + iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly; + +int ip_tunnel_encap_add_ops(const struct ip_tunnel_encap_ops *ops, + unsigned int num) +{ + return !cmpxchg((const struct ip_tunnel_encap_ops **) + &iptun_encaps[num], + NULL, ops) ? 0 : -1; +} +EXPORT_SYMBOL(ip_tunnel_encap_add_ops); + +int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *ops, + unsigned int num) +{ + int ret; + + ret = (cmpxchg((const struct ip_tunnel_encap_ops **) + &iptun_encaps[num], + ops, NULL) == ops) ? 0 : -1; + + synchronize_net(); + + return ret; +} +EXPORT_SYMBOL(ip_tunnel_encap_del_ops); + int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap) { @@ -526,63 +558,22 @@ int ip_tunnel_encap_setup(struct ip_tunnel *t, } EXPORT_SYMBOL_GPL(ip_tunnel_encap_setup); -static int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, - size_t hdr_len, u8 *protocol, struct flowi4 *fl4) -{ - struct udphdr *uh; - __be16 sport; - bool csum = !!(e->flags & TUNNEL_ENCAP_FLAG_CSUM); - int type = csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - - skb = iptunnel_handle_offloads(skb, csum, type); - - if (IS_ERR(skb)) - return PTR_ERR(skb); - - /* Get length and hash before making space in skb */ - - sport = e->sport ? : udp_flow_src_port(dev_net(skb->dev), - skb, 0, 0, false); - - skb_push(skb, hdr_len); - - skb_reset_transport_header(skb); - uh = udp_hdr(skb); - - if (e->type == TUNNEL_ENCAP_GUE) { - struct guehdr *guehdr = (struct guehdr *)&uh[1]; - - guehdr->version = 0; - guehdr->hlen = 0; - guehdr->flags = 0; - guehdr->next_hdr = *protocol; - } - - uh->dest = e->dport; - uh->source = sport; - uh->len = htons(skb->len); - uh->check = 0; - udp_set_csum(!(e->flags & TUNNEL_ENCAP_FLAG_CSUM), skb, - fl4->saddr, fl4->daddr, skb->len); - - *protocol = IPPROTO_UDP; - - return 0; -} - int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, u8 *protocol, struct flowi4 *fl4) { - switch (t->encap.type) { - case TUNNEL_ENCAP_NONE: + const struct ip_tunnel_encap_ops *ops; + int ret = -EINVAL; + + if (t->encap.type == TUNNEL_ENCAP_NONE) return 0; - case TUNNEL_ENCAP_FOU: - case TUNNEL_ENCAP_GUE: - return fou_build_header(skb, &t->encap, t->encap_hlen, - protocol, fl4); - default: - return -EINVAL; - } + + rcu_read_lock(); + ops = rcu_dereference(iptun_encaps[t->encap.type]); + if (likely(ops && ops->build_header)) + ret = ops->build_header(skb, &t->encap, protocol, fl4); + rcu_read_unlock(); + + return ret; } EXPORT_SYMBOL(ip_tunnel_encap); diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 648fa1490ea7..7fa18bc7e47f 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -115,7 +115,7 @@ */ int ic_set_manually __initdata = 0; /* IPconfig parameters set manually */ -static int ic_enable __initdata = 0; /* IP config enabled? */ +static int ic_enable __initdata; /* IP config enabled? */ /* Protocol choice */ int ic_proto_enabled __initdata = 0 @@ -130,7 +130,7 @@ int ic_proto_enabled __initdata = 0 #endif ; -static int ic_host_name_set __initdata = 0; /* Host name set by us? */ +static int ic_host_name_set __initdata; /* Host name set by us? */ __be32 ic_myaddr = NONE; /* My IP address */ static __be32 ic_netmask = NONE; /* Netmask for local subnet */ @@ -160,17 +160,17 @@ static u8 ic_domain[64]; /* DNS (not NIS) domain name */ static char user_dev_name[IFNAMSIZ] __initdata = { 0, }; /* Protocols supported by available interfaces */ -static int ic_proto_have_if __initdata = 0; +static int ic_proto_have_if __initdata; /* MTU for boot device */ -static int ic_dev_mtu __initdata = 0; +static int ic_dev_mtu __initdata; #ifdef IPCONFIG_DYNAMIC static DEFINE_SPINLOCK(ic_recv_lock); -static volatile int ic_got_reply __initdata = 0; /* Proto(s) that replied */ +static volatile int ic_got_reply __initdata; /* Proto(s) that replied */ #endif #ifdef IPCONFIG_DHCP -static int ic_dhcp_msgtype __initdata = 0; /* DHCP msg type received */ +static int ic_dhcp_msgtype __initdata; /* DHCP msg type received */ #endif @@ -186,8 +186,8 @@ struct ic_device { __be32 xid; }; -static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ -static struct net_device *ic_dev __initdata = NULL; /* Selected device */ +static struct ic_device *ic_first_dev __initdata; /* List of open device */ +static struct net_device *ic_dev __initdata; /* Selected device */ static bool __init ic_is_init_dev(struct net_device *dev) { @@ -498,7 +498,7 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt struct arphdr *rarp; unsigned char *rarp_ptr; __be32 sip, tip; - unsigned char *sha, *tha; /* s for "source", t for "target" */ + unsigned char *tha; /* t for "target" */ struct ic_device *d; if (!net_eq(dev_net(dev), &init_net)) @@ -549,7 +549,6 @@ ic_rarp_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt goto drop_unlock; /* should never happen */ /* Extract variable-width fields */ - sha = rarp_ptr; rarp_ptr += dev->addr_len; memcpy(&sip, rarp_ptr, 4); rarp_ptr += 4; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 37096d64730e..40403114f00a 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -465,7 +465,7 @@ static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, - tunnel->encap.dport)) + tunnel->encap.flags)) goto nla_put_failure; return 0; diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index b023b4eb1a96..1baaa83dfe5c 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -6,48 +6,45 @@ * published by the Free Software Foundation. */ +#include <linux/module.h> #include <net/ip.h> #include <net/tcp.h> #include <net/route.h> #include <net/dst.h> #include <linux/netfilter_ipv4.h> +#include <net/netfilter/ipv4/nf_reject.h> -/* Send RST reply */ -void nf_send_reset(struct sk_buff *oldskb, int hook) +const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb, + struct tcphdr *_oth, int hook) { - struct sk_buff *nskb; - const struct iphdr *oiph; - struct iphdr *niph; const struct tcphdr *oth; - struct tcphdr _otcph, *tcph; /* IP header checks: fragment. */ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) - return; + return NULL; oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), - sizeof(_otcph), &_otcph); + sizeof(struct tcphdr), _oth); if (oth == NULL) - return; + return NULL; /* No RST for RST. */ if (oth->rst) - return; - - if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) - return; + return NULL; /* Check checksum */ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) - return; - oiph = ip_hdr(oldskb); + return NULL; - nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + - LL_MAX_HEADER, GFP_ATOMIC); - if (!nskb) - return; + return oth; +} +EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get); - skb_reserve(nskb, LL_MAX_HEADER); +struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + __be16 protocol, int ttl) +{ + struct iphdr *niph, *oiph = ip_hdr(oldskb); skb_reset_network_header(nskb); niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); @@ -56,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) niph->tos = 0; niph->id = 0; niph->frag_off = htons(IP_DF); - niph->protocol = IPPROTO_TCP; + niph->protocol = protocol; niph->check = 0; niph->saddr = oiph->daddr; niph->daddr = oiph->saddr; + niph->ttl = ttl; + + nskb->protocol = htons(ETH_P_IP); + + return niph; +} +EXPORT_SYMBOL_GPL(nf_reject_iphdr_put); + +void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb, + const struct tcphdr *oth) +{ + struct iphdr *niph = ip_hdr(nskb); + struct tcphdr *tcph; skb_reset_transport_header(nskb); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); @@ -68,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) tcph->dest = oth->source; tcph->doff = sizeof(struct tcphdr) / 4; - if (oth->ack) + if (oth->ack) { tcph->seq = oth->ack_seq; - else { + } else { tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + oldskb->len - ip_hdrlen(oldskb) - (oth->doff << 2)); @@ -83,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum_start = (unsigned char *)tcph - nskb->head; nskb->csum_offset = offsetof(struct tcphdr, check); +} +EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put); + +/* Send RST reply */ +void nf_send_reset(struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + const struct iphdr *oiph; + struct iphdr *niph; + const struct tcphdr *oth; + struct tcphdr _oth; + + oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); + if (!oth) + return; + + if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) + return; + + oiph = ip_hdr(oldskb); + + nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + + LL_MAX_HEADER, GFP_ATOMIC); + if (!nskb) + return; /* ip_route_me_harder expects skb->dst to be set */ skb_dst_set_noref(nskb, skb_dst(oldskb)); - nskb->protocol = htons(ETH_P_IP); + skb_reserve(nskb, LL_MAX_HEADER); + niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, + ip4_dst_hoplimit(skb_dst(nskb))); + nf_reject_ip_tcphdr_put(nskb, oldskb, oth); + if (ip_route_me_harder(nskb, RTN_UNSPEC)) goto free_nskb; - niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); - /* "Never happens" */ if (nskb->len > dst_mtu(skb_dst(nskb))) goto free_nskb; @@ -125,3 +162,5 @@ void nf_send_reset(struct sk_buff *oldskb, int hook) kfree_skb(nskb); } EXPORT_SYMBOL_GPL(nf_send_reset); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index 1c636d6b5b50..c1023c445920 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c @@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv4_ops = { .eval = nft_masq_ipv4_eval, .init = nft_masq_init, .dump = nft_masq_dump, + .validate = nft_masq_validate, }; static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 57f7c9804139..ce2920f5bef3 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -875,7 +875,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } /* Don't bother checking the checksum */ - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; @@ -955,7 +955,7 @@ EXPORT_SYMBOL_GPL(ping_queue_rcv_skb); * All we need to do is get the socket. */ -void ping_rcv(struct sk_buff *skb) +bool ping_rcv(struct sk_buff *skb) { struct sock *sk; struct net *net = dev_net(skb->dev); @@ -974,11 +974,11 @@ void ping_rcv(struct sk_buff *skb) pr_debug("rcv on socket %p\n", sk); ping_queue_rcv_skb(sk, skb_get(skb)); sock_put(sk); - return; + return true; } pr_debug("no socket, dropping\n"); - /* We're called from icmp_rcv(). kfree_skb() is done there. */ + return false; } EXPORT_SYMBOL_GPL(ping_rcv); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 8e3eb39f84e7..6513ade8d6dc 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -181,6 +181,7 @@ static const struct snmp_mib snmp4_udp_list[] = { SNMP_MIB_ITEM("RcvbufErrors", UDP_MIB_RCVBUFERRORS), SNMP_MIB_ITEM("SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_ITEM("InCsumErrors", UDP_MIB_CSUMERRORS), + SNMP_MIB_ITEM("IgnoredMulti", UDP_MIB_IGNOREDMULTI), SNMP_MIB_SENTINEL }; @@ -296,12 +297,12 @@ static void icmpmsg_put_line(struct seq_file *seq, unsigned long *vals, int j; if (count) { - seq_printf(seq, "\nIcmpMsg:"); + seq_puts(seq, "\nIcmpMsg:"); for (j = 0; j < count; ++j) seq_printf(seq, " %sType%u", type[j] & 0x100 ? "Out" : "In", type[j] & 0xff); - seq_printf(seq, "\nIcmpMsg:"); + seq_puts(seq, "\nIcmpMsg:"); for (j = 0; j < count; ++j) seq_printf(seq, " %lu", vals[j]); } @@ -342,7 +343,7 @@ static void icmp_put(struct seq_file *seq) seq_puts(seq, "\nIcmp: InMsgs InErrors InCsumErrors"); for (i = 0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " In%s", icmpmibmap[i].name); - seq_printf(seq, " OutMsgs OutErrors"); + seq_puts(seq, " OutMsgs OutErrors"); for (i = 0; icmpmibmap[i].name != NULL; i++) seq_printf(seq, " Out%s", icmpmibmap[i].name); seq_printf(seq, "\nIcmp: %lu %lu %lu", diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 739db3100c23..43385a9fa441 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -79,6 +79,16 @@ #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/compat.h> +#include <linux/uio.h> + +struct raw_frag_vec { + struct iovec *iov; + union { + struct icmphdr icmph; + char c[1]; + } hdr; + int hlen; +}; static struct raw_hashinfo raw_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(raw_v4_hashinfo.lock), @@ -420,53 +430,57 @@ error: return err; } -static int raw_probe_proto_opt(struct flowi4 *fl4, struct msghdr *msg) +static int raw_probe_proto_opt(struct raw_frag_vec *rfv, struct flowi4 *fl4) { - struct iovec *iov; - u8 __user *type = NULL; - u8 __user *code = NULL; - int probed = 0; - unsigned int i; + int err; - if (!msg->msg_iov) + if (fl4->flowi4_proto != IPPROTO_ICMP) return 0; - for (i = 0; i < msg->msg_iovlen; i++) { - iov = &msg->msg_iov[i]; - if (!iov) - continue; - - switch (fl4->flowi4_proto) { - case IPPROTO_ICMP: - /* check if one-byte field is readable or not. */ - if (iov->iov_base && iov->iov_len < 1) - break; - - if (!type) { - type = iov->iov_base; - /* check if code field is readable or not. */ - if (iov->iov_len > 1) - code = type + 1; - } else if (!code) - code = iov->iov_base; - - if (type && code) { - if (get_user(fl4->fl4_icmp_type, type) || - get_user(fl4->fl4_icmp_code, code)) - return -EFAULT; - probed = 1; - } - break; - default: - probed = 1; - break; - } - if (probed) - break; - } + /* We only need the first two bytes. */ + rfv->hlen = 2; + + err = memcpy_fromiovec(rfv->hdr.c, rfv->iov, rfv->hlen); + if (err) + return err; + + fl4->fl4_icmp_type = rfv->hdr.icmph.type; + fl4->fl4_icmp_code = rfv->hdr.icmph.code; + return 0; } +static int raw_getfrag(void *from, char *to, int offset, int len, int odd, + struct sk_buff *skb) +{ + struct raw_frag_vec *rfv = from; + + if (offset < rfv->hlen) { + int copy = min(rfv->hlen - offset, len); + + if (skb->ip_summed == CHECKSUM_PARTIAL) + memcpy(to, rfv->hdr.c + offset, copy); + else + skb->csum = csum_block_add( + skb->csum, + csum_partial_copy_nocheck(rfv->hdr.c + offset, + to, copy, 0), + odd); + + odd = 0; + offset += copy; + to += copy; + len -= copy; + + if (!len) + return 0; + } + + offset -= rfv->hlen; + + return ip_generic_getfrag(rfv->iov, to, offset, len, odd, skb); +} + static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) { @@ -480,6 +494,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, u8 tos; int err; struct ip_options_data opt_copy; + struct raw_frag_vec rfv; err = -EMSGSIZE; if (len > 0xFFFF) @@ -585,7 +600,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, daddr, saddr, 0, 0); if (!inet->hdrincl) { - err = raw_probe_proto_opt(&fl4, msg); + rfv.iov = msg->msg_iov; + rfv.hlen = 0; + + err = raw_probe_proto_opt(&rfv, &fl4); if (err) goto done; } @@ -616,8 +634,8 @@ back_from_confirm: if (!ipc.addr) ipc.addr = fl4.daddr; lock_sock(sk); - err = ip_append_data(sk, &fl4, ip_generic_getfrag, - msg->msg_iov, len, 0, + err = ip_append_data(sk, &fl4, raw_getfrag, + &rfv, len, 0, &ipc, &rt, msg->msg_flags); if (err) ip_flush_pending_frames(sk); @@ -718,7 +736,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 2d4ae469b471..6a2155b02602 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1798,6 +1798,7 @@ local_input: no_route: RT_CACHE_STAT_INC(in_no_route); res.type = RTN_UNREACHABLE; + res.fi = NULL; goto local_input; /* diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 32b98d0207b4..45fe60c5238e 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -19,10 +19,6 @@ #include <net/tcp.h> #include <net/route.h> -/* Timestamps: lowest bits store TCP options */ -#define TSBITS 6 -#define TSMASK (((__u32)1 << TSBITS) - 1) - extern int sysctl_tcp_syncookies; static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; @@ -30,6 +26,30 @@ static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly; #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) +/* TCP Timestamp: 6 lowest bits of timestamp sent in the cookie SYN-ACK + * stores TCP options: + * + * MSB LSB + * | 31 ... 6 | 5 | 4 | 3 2 1 0 | + * | Timestamp | ECN | SACK | WScale | + * + * When we receive a valid cookie-ACK, we look at the echoed tsval (if + * any) to figure out which TCP options we should use for the rebuilt + * connection. + * + * A WScale setting of '0xf' (which is an invalid scaling value) + * means that original syn did not include the TCP window scaling option. + */ +#define TS_OPT_WSCALE_MASK 0xf +#define TS_OPT_SACK BIT(4) +#define TS_OPT_ECN BIT(5) +/* There is no TS_OPT_TIMESTAMP: + * if ACK contains timestamp option, we already know it was + * requested/supported by the syn/synack exchange. + */ +#define TSBITS 6 +#define TSMASK (((__u32)1 << TSBITS) - 1) + static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv4_cookie_scratch); @@ -67,9 +87,11 @@ __u32 cookie_init_timestamp(struct request_sock *req) ireq = inet_rsk(req); - options = ireq->wscale_ok ? ireq->snd_wscale : 0xf; - options |= ireq->sack_ok << 4; - options |= ireq->ecn_ok << 5; + options = ireq->wscale_ok ? ireq->snd_wscale : TS_OPT_WSCALE_MASK; + if (ireq->sack_ok) + options |= TS_OPT_SACK; + if (ireq->ecn_ok) + options |= TS_OPT_ECN; ts = ts_now & ~TSMASK; ts |= options; @@ -219,16 +241,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, * additional tcp options in the timestamp. * This extracts these options from the timestamp echo. * - * The lowest 4 bits store snd_wscale. - * next 2 bits indicate SACK and ECN support. - * - * return false if we decode an option that should not be. + * return false if we decode a tcp option that is disabled + * on the host. */ -bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, - struct net *net, bool *ecn_ok) +bool cookie_timestamp_decode(struct tcp_options_received *tcp_opt) { /* echoed timestamp, lowest bits contain options */ - u32 options = tcp_opt->rcv_tsecr & TSMASK; + u32 options = tcp_opt->rcv_tsecr; if (!tcp_opt->saw_tstamp) { tcp_clear_options(tcp_opt); @@ -238,22 +257,35 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, if (!sysctl_tcp_timestamps) return false; - tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0; - *ecn_ok = (options >> 5) & 1; - if (*ecn_ok && !net->ipv4.sysctl_tcp_ecn) - return false; + tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0; if (tcp_opt->sack_ok && !sysctl_tcp_sack) return false; - if ((options & 0xf) == 0xf) + if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK) return true; /* no window scaling */ tcp_opt->wscale_ok = 1; - tcp_opt->snd_wscale = options & 0xf; + tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK; + return sysctl_tcp_window_scaling != 0; } -EXPORT_SYMBOL(cookie_check_timestamp); +EXPORT_SYMBOL(cookie_timestamp_decode); + +bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt, + const struct net *net, const struct dst_entry *dst) +{ + bool ecn_ok = tcp_opt->rcv_tsecr & TS_OPT_ECN; + + if (!ecn_ok) + return false; + + if (net->ipv4.sysctl_tcp_ecn) + return true; + + return dst_feature(dst, RTAX_FEATURE_ECN); +} +EXPORT_SYMBOL(cookie_ecn_ok); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) { @@ -269,14 +301,16 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) int mss; struct rtable *rt; __u8 rcv_wscale; - bool ecn_ok = false; struct flowi4 fl4; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; - if (tcp_synq_no_recent_overflow(sk) || - (mss = __cookie_v4_check(ip_hdr(skb), th, cookie)) == 0) { + if (tcp_synq_no_recent_overflow(sk)) + goto out; + + mss = __cookie_v4_check(ip_hdr(skb), th, cookie); + if (mss == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } @@ -287,7 +321,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, 0, NULL); - if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) + if (!cookie_timestamp_decode(&tcp_opt)) goto out; ret = NULL; @@ -305,7 +339,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->ir_loc_addr = ip_hdr(skb)->daddr; ireq->ir_rmt_addr = ip_hdr(skb)->saddr; ireq->ir_mark = inet_request_mark(sk, skb); - ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; @@ -354,6 +387,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) dst_metric(&rt->dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; + ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); ret = get_cookie_sock(sk, skb, req, &rt->dst); /* ip_queue_xmit() depends on our flow being setup diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index b3c53c8b331e..e0ee384a448f 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -496,6 +496,13 @@ static struct ctl_table ipv4_table[] = { .proc_handler = proc_dointvec }, { + .procname = "tcp_max_reordering", + .data = &sysctl_tcp_max_reordering, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { .procname = "tcp_dsack", .data = &sysctl_tcp_dsack, .maxlen = sizeof(int), diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1bec4e76d88c..c239f4740d10 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1377,7 +1377,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) /* XXX -- need to support SO_PEEK_OFF */ skb_queue_walk(&sk->sk_write_queue, skb) { - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len); + err = skb_copy_datagram_msg(skb, 0, msg, skb->len); if (err) break; @@ -1833,8 +1833,7 @@ do_prequeue: } if (!(flags & MSG_TRUNC)) { - err = skb_copy_datagram_iovec(skb, offset, - msg->msg_iov, used); + err = skb_copy_datagram_msg(skb, offset, msg, used); if (err) { /* Exception. Bailout! */ if (!copied) @@ -2868,61 +2867,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt); #endif #ifdef CONFIG_TCP_MD5SIG -static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; +static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); static DEFINE_MUTEX(tcp_md5sig_mutex); - -static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); - - if (p->md5_desc.tfm) - crypto_free_hash(p->md5_desc.tfm); - } - free_percpu(pool); -} +static bool tcp_md5sig_pool_populated = false; static void __tcp_alloc_md5sig_pool(void) { int cpu; - struct tcp_md5sig_pool __percpu *pool; - - pool = alloc_percpu(struct tcp_md5sig_pool); - if (!pool) - return; for_each_possible_cpu(cpu) { - struct crypto_hash *hash; - - hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); - if (IS_ERR_OR_NULL(hash)) - goto out_free; + if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) { + struct crypto_hash *hash; - per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; + hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR_OR_NULL(hash)) + return; + per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; + } } - /* before setting tcp_md5sig_pool, we must commit all writes - * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() + /* before setting tcp_md5sig_pool_populated, we must commit all writes + * to memory. See smp_rmb() in tcp_get_md5sig_pool() */ smp_wmb(); - tcp_md5sig_pool = pool; - return; -out_free: - __tcp_free_md5sig_pool(pool); + tcp_md5sig_pool_populated = true; } bool tcp_alloc_md5sig_pool(void) { - if (unlikely(!tcp_md5sig_pool)) { + if (unlikely(!tcp_md5sig_pool_populated)) { mutex_lock(&tcp_md5sig_mutex); - if (!tcp_md5sig_pool) + if (!tcp_md5sig_pool_populated) __tcp_alloc_md5sig_pool(); mutex_unlock(&tcp_md5sig_mutex); } - return tcp_md5sig_pool != NULL; + return tcp_md5sig_pool_populated; } EXPORT_SYMBOL(tcp_alloc_md5sig_pool); @@ -2936,13 +2916,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); */ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) { - struct tcp_md5sig_pool __percpu *p; - local_bh_disable(); - p = ACCESS_ONCE(tcp_md5sig_pool); - if (p) - return raw_cpu_ptr(p); + if (tcp_md5sig_pool_populated) { + /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ + smp_rmb(); + return this_cpu_ptr(&tcp_md5sig_pool); + } local_bh_enable(); return NULL; } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index b1c5970d47a1..27ead0dd16bc 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -1,5 +1,5 @@ /* - * Plugable TCP congestion control support and newReno + * Pluggable TCP congestion control support and newReno * congestion control. * Based on ideas from I/O scheduler support and Web100. * diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a12b455928e5..d91436ba17ea 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -81,6 +81,7 @@ int sysctl_tcp_window_scaling __read_mostly = 1; int sysctl_tcp_sack __read_mostly = 1; int sysctl_tcp_fack __read_mostly = 1; int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; +int sysctl_tcp_max_reordering __read_mostly = 300; EXPORT_SYMBOL(sysctl_tcp_reordering); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; @@ -833,7 +834,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, if (metric > tp->reordering) { int mib_idx; - tp->reordering = min(TCP_MAX_REORDERING, metric); + tp->reordering = min(sysctl_tcp_max_reordering, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) @@ -2315,6 +2316,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp) /* Undo procedures. */ +/* We can clear retrans_stamp when there are no retransmissions in the + * window. It would seem that it is trivially available for us in + * tp->retrans_out, however, that kind of assumptions doesn't consider + * what will happen if errors occur when sending retransmission for the + * second time. ...It could the that such segment has only + * TCPCB_EVER_RETRANS set at the present time. It seems that checking + * the head skb is enough except for some reneging corner cases that + * are not worth the effort. + * + * Main reason for all this complexity is the fact that connection dying + * time now depends on the validity of the retrans_stamp, in particular, + * that successive retransmissions of a segment must not advance + * retrans_stamp under any conditions. + */ +static bool tcp_any_retrans_done(const struct sock *sk) +{ + const struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *skb; + + if (tp->retrans_out) + return true; + + skb = tcp_write_queue_head(sk); + if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) + return true; + + return false; +} + #if FASTRETRANS_DEBUG > 1 static void DBGUNDO(struct sock *sk, const char *msg) { @@ -2410,6 +2440,8 @@ static bool tcp_try_undo_recovery(struct sock *sk) * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ tcp_moderate_cwnd(tp); + if (!tcp_any_retrans_done(sk)) + tp->retrans_stamp = 0; return true; } tcp_set_ca_state(sk, TCP_CA_Open); @@ -2430,35 +2462,6 @@ static bool tcp_try_undo_dsack(struct sock *sk) return false; } -/* We can clear retrans_stamp when there are no retransmissions in the - * window. It would seem that it is trivially available for us in - * tp->retrans_out, however, that kind of assumptions doesn't consider - * what will happen if errors occur when sending retransmission for the - * second time. ...It could the that such segment has only - * TCPCB_EVER_RETRANS set at the present time. It seems that checking - * the head skb is enough except for some reneging corner cases that - * are not worth the effort. - * - * Main reason for all this complexity is the fact that connection dying - * time now depends on the validity of the retrans_stamp, in particular, - * that successive retransmissions of a segment must not advance - * retrans_stamp under any conditions. - */ -static bool tcp_any_retrans_done(const struct sock *sk) -{ - const struct tcp_sock *tp = tcp_sk(sk); - struct sk_buff *skb; - - if (tp->retrans_out) - return true; - - skb = tcp_write_queue_head(sk); - if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) - return true; - - return false; -} - /* Undo during loss recovery after partial ACK or using F-RTO. */ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) { @@ -5028,7 +5031,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, /* step 3: check security and precedence [ignored] */ /* step 4: Check for a SYN - * RFC 5691 4.2 : Send a challenge ack + * RFC 5961 4.2 : Send a challenge ack */ if (th->syn) { syn_challenge: @@ -5851,12 +5854,12 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) struct inet_request_sock *ireq = inet_rsk(req); if (family == AF_INET) - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"), - &ireq->ir_rmt_addr, port); + net_dbg_ratelimited("drop open request from %pI4/%u\n", + &ireq->ir_rmt_addr, port); #if IS_ENABLED(CONFIG_IPV6) else if (family == AF_INET6) - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"), - &ireq->ir_v6_rmt_addr, port); + net_dbg_ratelimited("drop open request from %pI6/%u\n", + &ireq->ir_v6_rmt_addr, port); #endif } @@ -5865,7 +5868,7 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) * If we receive a SYN packet with these bits set, it means a * network is playing bad games with TOS bits. In order to * avoid possible false congestion notifications, we disable - * TCP ECN negociation. + * TCP ECN negotiation. * * Exception: tcp_ca wants ECN. This is required for DCTCP * congestion control; it requires setting ECT on all packets, @@ -5875,20 +5878,22 @@ static inline void pr_drop_req(struct request_sock *req, __u16 port, int family) */ static void tcp_ecn_create_request(struct request_sock *req, const struct sk_buff *skb, - const struct sock *listen_sk) + const struct sock *listen_sk, + const struct dst_entry *dst) { const struct tcphdr *th = tcp_hdr(skb); const struct net *net = sock_net(listen_sk); bool th_ecn = th->ece && th->cwr; - bool ect, need_ecn; + bool ect, need_ecn, ecn_ok; if (!th_ecn) return; ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield); need_ecn = tcp_ca_needs_ecn(listen_sk); + ecn_ok = net->ipv4.sysctl_tcp_ecn || dst_feature(dst, RTAX_FEATURE_ECN); - if (!ect && !need_ecn && net->ipv4.sysctl_tcp_ecn) + if (!ect && !need_ecn && ecn_ok) inet_rsk(req)->ecn_ok = 1; else if (ect && need_ecn) inet_rsk(req)->ecn_ok = 1; @@ -5953,13 +5958,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; - if (!want_cookie || tmp_opt.tstamp_ok) - tcp_ecn_create_request(req, skb, sk); - - if (want_cookie) { - isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); - req->cookie_ts = tmp_opt.tstamp_ok; - } else if (!isn) { + if (!want_cookie && !isn) { /* VJ's idea. We save last timestamp seen * from the destination in peer table, when entering * state TIME-WAIT, and check against it before @@ -6007,6 +6006,15 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, goto drop_and_free; } + tcp_ecn_create_request(req, skb, sk, dst); + + if (want_cookie) { + isn = cookie_init_sequence(af_ops, sk, skb, &req->mss); + req->cookie_ts = tmp_opt.tstamp_ok; + if (!tmp_opt.tstamp_ok) + inet_rsk(req)->ecn_ok = 0; + } + tcp_rsk(req)->snt_isn = isn; tcp_openreq_init_rwin(req, sk, dst); fastopen = !want_cookie && diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 94d1a7757ff7..2c6a955fd5c3 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -206,8 +206,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_dport = usin->sin_port; inet->inet_daddr = daddr; - inet_set_txhash(sk); - inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; @@ -224,6 +222,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (err) goto failure; + inet_set_txhash(sk); + rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { @@ -1429,6 +1429,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); + sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, 0) == NULL) { @@ -1450,6 +1451,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); + sk_mark_napi_id(sk, skb); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; @@ -1661,7 +1663,7 @@ process: if (sk_filter(sk, skb)) goto discard_and_relse; - sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); skb->dev = NULL; bh_lock_sock_nested(sk); diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 5b90f2f447a5..9d7930ba8e0f 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -94,9 +94,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb, SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | SKB_GSO_SIT | - SKB_GSO_MPLS | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | + SKB_GSO_TUNNEL_REMCSUM | 0) || !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) goto out; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3af21296d967..f5bd4bd3f7e6 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -333,10 +333,19 @@ static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb) static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); + bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || + tcp_ca_needs_ecn(sk); + + if (!use_ecn) { + const struct dst_entry *dst = __sk_dst_get(sk); + + if (dst && dst_feature(dst, RTAX_FEATURE_ECN)) + use_ecn = true; + } tp->ecn_flags = 0; - if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || - tcp_ca_needs_ecn(sk)) { + + if (use_ecn) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; tp->ecn_flags = TCP_ECN_OK; if (tcp_ca_needs_ecn(sk)) @@ -1553,7 +1562,7 @@ static unsigned int tcp_mss_split_point(const struct sock *sk, static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, const struct sk_buff *skb) { - u32 in_flight, cwnd; + u32 in_flight, cwnd, halfcwnd; /* Don't be strict about the congestion window for the final FIN. */ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && @@ -1562,10 +1571,14 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp, in_flight = tcp_packets_in_flight(tp); cwnd = tp->snd_cwnd; - if (in_flight < cwnd) - return (cwnd - in_flight); + if (in_flight >= cwnd) + return 0; - return 0; + /* For better scheduling, ensure we have at least + * 2 GSO packets in flight. + */ + halfcwnd = max(cwnd >> 1, 1U); + return min(halfcwnd, cwnd - in_flight); } /* Initialize TSO state of a skb. @@ -2126,7 +2139,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) static bool skb_still_in_host_queue(const struct sock *sk, const struct sk_buff *skb) { - if (unlikely(skb_fclone_busy(skb))) { + if (unlikely(skb_fclone_busy(sk, skb))) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); return true; @@ -2998,9 +3011,9 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_request *fo = tp->fastopen_req; - int syn_loss = 0, space, i, err = 0, iovlen = fo->data->msg_iovlen; - struct sk_buff *syn_data = NULL, *data; + int syn_loss = 0, space, err = 0; unsigned long last_syn_loss = 0; + struct sk_buff *syn_data; tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie, @@ -3031,48 +3044,40 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) /* limit to order-0 allocations */ space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); - syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space, - sk->sk_allocation); - if (syn_data == NULL) + syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation); + if (!syn_data) + goto fallback; + syn_data->ip_summed = CHECKSUM_PARTIAL; + memcpy(syn_data->cb, syn->cb, sizeof(syn->cb)); + if (unlikely(memcpy_fromiovecend(skb_put(syn_data, space), + fo->data->msg_iov, 0, space))) { + kfree_skb(syn_data); goto fallback; + } - for (i = 0; i < iovlen && syn_data->len < space; ++i) { - struct iovec *iov = &fo->data->msg_iov[i]; - unsigned char __user *from = iov->iov_base; - int len = iov->iov_len; + /* No more data pending in inet_wait_for_connect() */ + if (space == fo->size) + fo->data = NULL; + fo->copied = space; - if (syn_data->len + len > space) - len = space - syn_data->len; - else if (i + 1 == iovlen) - /* No more data pending in inet_wait_for_connect() */ - fo->data = NULL; + tcp_connect_queue_skb(sk, syn_data); - if (skb_add_data(syn_data, from, len)) - goto fallback; - } + err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); - /* Queue a data-only packet after the regular SYN for retransmission */ - data = pskb_copy(syn_data, sk->sk_allocation); - if (data == NULL) - goto fallback; - TCP_SKB_CB(data)->seq++; - TCP_SKB_CB(data)->tcp_flags &= ~TCPHDR_SYN; - TCP_SKB_CB(data)->tcp_flags = (TCPHDR_ACK|TCPHDR_PSH); - tcp_connect_queue_skb(sk, data); - fo->copied = data->len; - - /* syn_data is about to be sent, we need to take current time stamps - * for the packets that are in write queue : SYN packet and DATA - */ - skb_mstamp_get(&syn->skb_mstamp); - data->skb_mstamp = syn->skb_mstamp; + syn->skb_mstamp = syn_data->skb_mstamp; - if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) { + /* Now full SYN+DATA was cloned and sent (or not), + * remove the SYN from the original skb (syn_data) + * we keep in write queue in case of a retransmit, as we + * also have the SYN packet (with no data) in the same queue. + */ + TCP_SKB_CB(syn_data)->seq++; + TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH; + if (!err) { tp->syn_data = (fo->copied > 0); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT); goto done; } - syn_data = NULL; fallback: /* Send a regular SYN with Fast Open cookie request option */ @@ -3081,7 +3086,6 @@ fallback: err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation); if (err) tp->syn_fastopen = 0; - kfree_skb(syn_data); done: fo->cookie.len = -1; /* Exclude Fast Open option for SYN retries */ return err; @@ -3101,13 +3105,10 @@ int tcp_connect(struct sock *sk) return 0; } - buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); - if (unlikely(buff == NULL)) + buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); + if (unlikely(!buff)) return -ENOBUFS; - /* Reserve space for headers. */ - skb_reserve(buff, MAX_TCP_HEADER); - tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); tp->retrans_stamp = tcp_time_stamp; tcp_connect_queue_skb(sk, buff); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 9b21ae8b2e31..1829c7fbc77e 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -374,17 +374,19 @@ void tcp_retransmit_timer(struct sock *sk) */ struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"), - &inet->inet_daddr, - ntohs(inet->inet_dport), inet->inet_num, - tp->snd_una, tp->snd_nxt); + net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", + &inet->inet_daddr, + ntohs(inet->inet_dport), + inet->inet_num, + tp->snd_una, tp->snd_nxt); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n"), - &sk->sk_v6_daddr, - ntohs(inet->inet_dport), inet->inet_num, - tp->snd_una, tp->snd_nxt); + net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n", + &sk->sk_v6_daddr, + ntohs(inet->inet_dport), + inet->inet_num, + tp->snd_una, tp->snd_nxt); } #endif if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index cd0db5471bb5..4a16b9129079 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -144,7 +144,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, struct hlist_nulls_node *node; kuid_t uid = sock_i_uid(sk); - sk_nulls_for_each(sk2, node, &hslot->head) + sk_nulls_for_each(sk2, node, &hslot->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (bitmap || udp_sk(sk2)->udp_port_hash == num) && @@ -152,14 +152,13 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || - !uid_eq(uid, sock_i_uid(sk2))) && - (*saddr_comp)(sk, sk2)) { - if (bitmap) - __set_bit(udp_sk(sk2)->udp_port_hash >> log, - bitmap); - else + !uid_eq(uid, sock_i_uid(sk2))) && + saddr_comp(sk, sk2)) { + if (!bitmap) return 1; + __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap); } + } return 0; } @@ -168,10 +167,10 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num, * can insert/delete a socket with local_port == num */ static int udp_lib_lport_inuse2(struct net *net, __u16 num, - struct udp_hslot *hslot2, - struct sock *sk, - int (*saddr_comp)(const struct sock *sk1, - const struct sock *sk2)) + struct udp_hslot *hslot2, + struct sock *sk, + int (*saddr_comp)(const struct sock *sk1, + const struct sock *sk2)) { struct sock *sk2; struct hlist_nulls_node *node; @@ -179,7 +178,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, int res = 0; spin_lock(&hslot2->lock); - udp_portaddr_for_each_entry(sk2, node, &hslot2->head) + udp_portaddr_for_each_entry(sk2, node, &hslot2->head) { if (net_eq(sock_net(sk2), net) && sk2 != sk && (udp_sk(sk2)->udp_port_hash == num) && @@ -187,11 +186,12 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && (!sk2->sk_reuseport || !sk->sk_reuseport || - !uid_eq(uid, sock_i_uid(sk2))) && - (*saddr_comp)(sk, sk2)) { + !uid_eq(uid, sock_i_uid(sk2))) && + saddr_comp(sk, sk2)) { res = 1; break; } + } spin_unlock(&hslot2->lock); return res; } @@ -206,8 +206,8 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num, * with NULL address */ int udp_lib_get_port(struct sock *sk, unsigned short snum, - int (*saddr_comp)(const struct sock *sk1, - const struct sock *sk2), + int (*saddr_comp)(const struct sock *sk1, + const struct sock *sk2), unsigned int hash2_nulladdr) { struct udp_hslot *hslot, *hslot2; @@ -1051,7 +1051,7 @@ back_from_confirm: /* ... which is an evident application bug. --ANK */ release_sock(sk); - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("cork app bug 2\n")); + net_dbg_ratelimited("cork app bug 2\n"); err = -EINVAL; goto out; } @@ -1133,7 +1133,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, if (unlikely(!up->pending)) { release_sock(sk); - LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("udp cork app bug 3\n")); + net_dbg_ratelimited("udp cork app bug 3\n"); return -EINVAL; } @@ -1281,8 +1281,8 @@ try_again: } if (skb_csum_unnecessary(skb)) - err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), - msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), + msg, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), @@ -1445,6 +1445,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (inet_sk(sk)->inet_daddr) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); } rc = sock_queue_rcv_skb(sk, skb); @@ -1546,8 +1547,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) * provided by the application." */ if (up->pcrlen == 0) { /* full coverage was set */ - LIMIT_NETDEBUG(KERN_WARNING "UDPLite: partial coverage %d while full coverage %d requested\n", - UDP_SKB_CB(skb)->cscov, skb->len); + net_dbg_ratelimited("UDPLite: partial coverage %d while full coverage %d requested\n", + UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } /* The next case involves violating the min. coverage requested @@ -1557,8 +1558,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) * Therefore the above ...()->partial_cov statement is essential. */ if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { - LIMIT_NETDEBUG(KERN_WARNING "UDPLite: coverage %d too small, need min %d\n", - UDP_SKB_CB(skb)->cscov, up->pcrlen); + net_dbg_ratelimited("UDPLite: coverage %d too small, need min %d\n", + UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } @@ -1647,7 +1648,8 @@ static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct udphdr *uh, __be32 saddr, __be32 daddr, - struct udp_table *udptable) + struct udp_table *udptable, + int proto) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; struct hlist_nulls_node *node; @@ -1656,6 +1658,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, int dif = skb->dev->ifindex; unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); + bool inner_flushed = false; if (use_hash2) { hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & @@ -1674,6 +1677,7 @@ start_lookup: dif, hnum)) { if (unlikely(count == ARRAY_SIZE(stack))) { flush_stack(stack, count, skb, ~0); + inner_flushed = true; count = 0; } stack[count++] = sk; @@ -1695,7 +1699,10 @@ start_lookup: if (count) { flush_stack(stack, count, skb, count - 1); } else { - kfree_skb(skb); + if (!inner_flushed) + UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, + proto == IPPROTO_UDPLITE); + consume_skb(skb); } return 0; } @@ -1777,14 +1784,13 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, if (ret > 0) return -ret; return 0; - } else { - if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) - return __udp4_lib_mcast_deliver(net, skb, uh, - saddr, daddr, udptable); - - sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); } + if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) + return __udp4_lib_mcast_deliver(net, skb, uh, + saddr, daddr, udptable, proto); + + sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk != NULL) { int ret; @@ -1822,11 +1828,11 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, return 0; short_packet: - LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", - proto == IPPROTO_UDPLITE ? "Lite" : "", - &saddr, ntohs(uh->source), - ulen, skb->len, - &daddr, ntohs(uh->dest)); + net_dbg_ratelimited("UDP%s: short packet: From %pI4:%u %d/%d to %pI4:%u\n", + proto == IPPROTO_UDPLITE ? "Lite" : "", + &saddr, ntohs(uh->source), + ulen, skb->len, + &daddr, ntohs(uh->dest)); goto drop; csum_error: @@ -1834,10 +1840,10 @@ csum_error: * RFC1122: OK. Discards the bad packet silently (as far as * the network is concerned, anyway) as per 4.1.3.4 (MUST). */ - LIMIT_NETDEBUG(KERN_DEBUG "UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", - proto == IPPROTO_UDPLITE ? "Lite" : "", - &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), - ulen); + net_dbg_ratelimited("UDP%s: bad checksum. From %pI4:%u to %pI4:%u ulen %d\n", + proto == IPPROTO_UDPLITE ? "Lite" : "", + &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), + ulen); UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); @@ -2027,7 +2033,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname, } else { up->corkflag = 0; lock_sock(sk); - (*push_pending_frames)(sk); + push_pending_frames(sk); release_sock(sk); } break; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 507310ef4b56..d3e537ef6b7f 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -29,7 +29,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, netdev_features_t features), - __be16 new_protocol) + __be16 new_protocol, bool is_ipv6) { struct sk_buff *segs = ERR_PTR(-EINVAL); u16 mac_offset = skb->mac_header; @@ -39,7 +39,10 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t enc_features; int udp_offset, outer_hlen; unsigned int oldlen; - bool need_csum; + bool need_csum = !!(skb_shinfo(skb)->gso_type & + SKB_GSO_UDP_TUNNEL_CSUM); + bool remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); + bool offload_csum = false, dont_encap = (need_csum || remcsum); oldlen = (u16)~skb->len; @@ -52,13 +55,16 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, skb_set_network_header(skb, skb_inner_network_offset(skb)); skb->mac_len = skb_inner_network_offset(skb); skb->protocol = new_protocol; + skb->encap_hdr_csum = need_csum; + skb->remcsum_offload = remcsum; - need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); - if (need_csum) - skb->encap_hdr_csum = 1; + /* Try to offload checksum if possible */ + offload_csum = !!(need_csum && + (skb->dev->features & + (is_ipv6 ? NETIF_F_V6_CSUM : NETIF_F_V4_CSUM))); /* segment inner packet. */ - enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); + enc_features = skb->dev->hw_enc_features & features; segs = gso_inner_segment(skb, enc_features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, @@ -72,11 +78,21 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, do { struct udphdr *uh; int len; - - skb_reset_inner_headers(skb); - skb->encapsulation = 1; + __be32 delta; + + if (dont_encap) { + skb->encapsulation = 0; + skb->ip_summed = CHECKSUM_NONE; + } else { + /* Only set up inner headers if we might be offloading + * inner checksum. + */ + skb_reset_inner_headers(skb); + skb->encapsulation = 1; + } skb->mac_len = mac_len; + skb->protocol = protocol; skb_push(skb, outer_hlen); skb_reset_mac_header(skb); @@ -86,19 +102,36 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, uh = udp_hdr(skb); uh->len = htons(len); - if (need_csum) { - __be32 delta = htonl(oldlen + len); + if (!need_csum) + continue; - uh->check = ~csum_fold((__force __wsum) - ((__force u32)uh->check + - (__force u32)delta)); + delta = htonl(oldlen + len); + + uh->check = ~csum_fold((__force __wsum) + ((__force u32)uh->check + + (__force u32)delta)); + if (offload_csum) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_start = skb_transport_header(skb) - skb->head; + skb->csum_offset = offsetof(struct udphdr, check); + } else if (remcsum) { + /* Need to calculate checksum from scratch, + * inner checksums are never when doing + * remote_checksum_offload. + */ + + skb->csum = skb_checksum(skb, udp_offset, + skb->len - udp_offset, + 0); + uh->check = csum_fold(skb->csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + } else { uh->check = gso_make_checksum(skb, ~uh->check); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } - - skb->protocol = protocol; } while ((skb = skb->next)); out: return segs; @@ -134,7 +167,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, } segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, - protocol); + protocol, is_ipv6); out_unlock: rcu_read_unlock(); @@ -172,9 +205,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | + SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_IPIP | - SKB_GSO_GRE | SKB_GSO_GRE_CSUM | - SKB_GSO_MPLS) || + SKB_GSO_GRE | SKB_GSO_GRE_CSUM) || !(type & (SKB_GSO_UDP)))) goto out; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 725c763270a0..251fcb48b216 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1170,6 +1170,9 @@ enum { IPV6_SADDR_RULE_PRIVACY, IPV6_SADDR_RULE_ORCHID, IPV6_SADDR_RULE_PREFIX, +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + IPV6_SADDR_RULE_NOT_OPTIMISTIC, +#endif IPV6_SADDR_RULE_MAX }; @@ -1197,6 +1200,15 @@ static inline int ipv6_saddr_preferred(int type) return 0; } +static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev) +{ +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic; +#else + return false; +#endif +} + static int ipv6_get_saddr_eval(struct net *net, struct ipv6_saddr_score *score, struct ipv6_saddr_dst *dst, @@ -1257,10 +1269,16 @@ static int ipv6_get_saddr_eval(struct net *net, score->scopedist = ret; break; case IPV6_SADDR_RULE_PREFERRED: + { /* Rule 3: Avoid deprecated and optimistic addresses */ + u8 avoid = IFA_F_DEPRECATED; + + if (!ipv6_use_optimistic_addr(score->ifa->idev)) + avoid |= IFA_F_OPTIMISTIC; ret = ipv6_saddr_preferred(score->addr_type) || - !(score->ifa->flags & (IFA_F_DEPRECATED|IFA_F_OPTIMISTIC)); + !(score->ifa->flags & avoid); break; + } #ifdef CONFIG_IPV6_MIP6 case IPV6_SADDR_RULE_HOA: { @@ -1306,6 +1324,14 @@ static int ipv6_get_saddr_eval(struct net *net, ret = score->ifa->prefix_len; score->matchlen = ret; break; +#ifdef CONFIG_IPV6_OPTIMISTIC_DAD + case IPV6_SADDR_RULE_NOT_OPTIMISTIC: + /* Optimistic addresses still have lower precedence than other + * preferred addresses. + */ + ret = !(score->ifa->flags & IFA_F_OPTIMISTIC); + break; +#endif default: ret = 0; } @@ -1385,10 +1411,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, if (unlikely(score->addr_type == IPV6_ADDR_ANY || score->addr_type & IPV6_ADDR_MULTICAST)) { - LIMIT_NETDEBUG(KERN_DEBUG - "ADDRCONF: unspecified / multicast address " - "assigned as unicast address on %s", - dev->name); + net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s", + dev->name); continue; } @@ -2315,8 +2339,8 @@ ok: else stored_lft = 0; if (!update_lft && !create && stored_lft) { - const u32 minimum_lft = min( - stored_lft, (u32)MIN_VALID_LIFETIME); + const u32 minimum_lft = min_t(u32, + stored_lft, MIN_VALID_LIFETIME); valid_lft = max(valid_lft, minimum_lft); /* RFC4862 Section 5.5.3e: @@ -3222,8 +3246,15 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) * Optimistic nodes can start receiving * Frames right away */ - if (ifp->flags & IFA_F_OPTIMISTIC) + if (ifp->flags & IFA_F_OPTIMISTIC) { ip6_ins_rt(ifp->rt); + if (ipv6_use_optimistic_addr(idev)) { + /* Because optimistic nodes can use this address, + * notify listeners. If DAD fails, RTM_DELADDR is sent. + */ + ipv6_ifa_notify(RTM_NEWADDR, ifp); + } + } addrconf_dad_kick(ifp); out: @@ -4330,6 +4361,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad; + array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic; #endif #ifdef CONFIG_IPV6_MROUTE array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding; @@ -4531,6 +4563,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token) } write_unlock_bh(&idev->lock); + inet6_ifinfo_notify(RTM_NEWLINK, idev); addrconf_verify_rtnl(); return 0; } @@ -5155,6 +5188,14 @@ static struct addrconf_sysctl_table .proc_handler = proc_dointvec, }, + { + .procname = "use_optimistic", + .data = &ipv6_devconf.use_optimistic, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + + }, #endif #ifdef CONFIG_IPV6_MROUTE { diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index 6d16eb0e0c7f..8ab1989198f6 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c @@ -272,10 +272,9 @@ static int ipv6_clear_mutable_options(struct ipv6hdr *iph, int len, int dir) ipv6_rearrange_destopt(iph, exthdr.opth); case NEXTHDR_HOP: if (!zero_out_mutable_opts(exthdr.opth)) { - LIMIT_NETDEBUG( - KERN_WARNING "overrun %sopts\n", - nexthdr == NEXTHDR_HOP ? - "hop" : "dest"); + net_dbg_ratelimited("overrun %sopts\n", + nexthdr == NEXTHDR_HOP ? + "hop" : "dest"); return -EINVAL; } break; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 2cdc38338be3..cc1139687fd7 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -351,7 +351,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) msg->msg_flags |= MSG_TRUNC; copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; @@ -445,7 +445,7 @@ int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, msg->msg_flags |= MSG_TRUNC; copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free_skb; @@ -893,8 +893,8 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, break; } default: - LIMIT_NETDEBUG(KERN_DEBUG "invalid cmsg type: %d\n", - cmsg->cmsg_type); + net_dbg_ratelimited("invalid cmsg type: %d\n", + cmsg->cmsg_type); err = -EINVAL; goto exit_f; } diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 83fc3a385a26..d2c2d749b6db 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -286,8 +286,8 @@ static int esp_input_done2(struct sk_buff *skb, int err) err = -EINVAL; padlen = nexthdr[0]; if (padlen + 2 + alen >= elen) { - LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage " - "padlen=%d, elen=%d\n", padlen + 2, elen - alen); + net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", + padlen + 2, elen - alen); goto out; } @@ -544,12 +544,12 @@ static int esp_init_authenc(struct xfrm_state *x) BUG_ON(!aalg_desc); err = -EINVAL; - if (aalg_desc->uinfo.auth.icv_fullbits/8 != + if (aalg_desc->uinfo.auth.icv_fullbits / 8 != crypto_aead_authsize(aead)) { - NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", - x->aalg->alg_name, - crypto_aead_authsize(aead), - aalg_desc->uinfo.auth.icv_fullbits/8); + pr_info("ESP: %s digestsize %u != %hu\n", + x->aalg->alg_name, + crypto_aead_authsize(aead), + aalg_desc->uinfo.auth.icv_fullbits / 8); goto free_key; } diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c index bfde361b6134..a7bbbe45570b 100644 --- a/net/ipv6/exthdrs.c +++ b/net/ipv6/exthdrs.c @@ -47,7 +47,7 @@ #include <net/xfrm.h> #endif -#include <asm/uaccess.h> +#include <linux/uaccess.h> /* * Parsing tlv encoded headers. @@ -184,7 +184,7 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) int ret; if (opt->dsthao) { - LIMIT_NETDEBUG(KERN_DEBUG "hao duplicated\n"); + net_dbg_ratelimited("hao duplicated\n"); goto discard; } opt->dsthao = opt->dst1; @@ -193,14 +193,14 @@ static bool ipv6_dest_hao(struct sk_buff *skb, int optoff) hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); if (hao->length != 16) { - LIMIT_NETDEBUG( - KERN_DEBUG "hao invalid option length = %d\n", hao->length); + net_dbg_ratelimited("hao invalid option length = %d\n", + hao->length); goto discard; } if (!(ipv6_addr_type(&hao->addr) & IPV6_ADDR_UNICAST)) { - LIMIT_NETDEBUG( - KERN_DEBUG "hao is not an unicast addr: %pI6\n", &hao->addr); + net_dbg_ratelimited("hao is not an unicast addr: %pI6\n", + &hao->addr); goto discard; } @@ -551,8 +551,8 @@ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff) memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra)); return true; } - LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", - nh[optoff + 1]); + net_dbg_ratelimited("ipv6_hop_ra: wrong RA length %d\n", + nh[optoff + 1]); kfree_skb(skb); return false; } @@ -566,8 +566,8 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff) u32 pkt_len; if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { - LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", - nh[optoff+1]); + net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", + nh[optoff+1]); IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), IPSTATS_MIB_INHDRERRORS); goto drop; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 97ae70077a4f..39b3ff97a504 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -338,7 +338,7 @@ static struct dst_entry *icmpv6_route_lookup(struct net *net, * anycast. */ if (((struct rt6_info *)dst)->rt6i_flags & RTF_ANYCAST) { - LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: acast source\n"); + net_dbg_ratelimited("icmp6_send: acast source\n"); dst_release(dst); return ERR_PTR(-EINVAL); } @@ -452,7 +452,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) * and anycast addresses will be checked later. */ if ((addr_type == IPV6_ADDR_ANY) || (addr_type & IPV6_ADDR_MULTICAST)) { - LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: addr_any/mcast source\n"); + net_dbg_ratelimited("icmp6_send: addr_any/mcast source\n"); return; } @@ -460,7 +460,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) * Never answer to a ICMP packet. */ if (is_ineligible(skb)) { - LIMIT_NETDEBUG(KERN_DEBUG "icmp6_send: no reply to icmp error\n"); + net_dbg_ratelimited("icmp6_send: no reply to icmp error\n"); return; } @@ -509,7 +509,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) len = skb->len - msg.offset; len = min_t(unsigned int, len, IPV6_MIN_MTU - sizeof(struct ipv6hdr) - sizeof(struct icmp6hdr)); if (len < 0) { - LIMIT_NETDEBUG(KERN_DEBUG "icmp: len problem\n"); + net_dbg_ratelimited("icmp: len problem\n"); goto out_dst_release; } @@ -679,6 +679,7 @@ static int icmpv6_rcv(struct sk_buff *skb) const struct in6_addr *saddr, *daddr; struct icmp6hdr *hdr; u8 type; + bool success = false; if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { struct sec_path *sp = skb_sec_path(skb); @@ -706,9 +707,8 @@ static int icmpv6_rcv(struct sk_buff *skb) daddr = &ipv6_hdr(skb)->daddr; if (skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo)) { - LIMIT_NETDEBUG(KERN_DEBUG - "ICMPv6 checksum failed [%pI6c > %pI6c]\n", - saddr, daddr); + net_dbg_ratelimited("ICMPv6 checksum failed [%pI6c > %pI6c]\n", + saddr, daddr); goto csum_error; } @@ -727,7 +727,7 @@ static int icmpv6_rcv(struct sk_buff *skb) break; case ICMPV6_ECHO_REPLY: - ping_rcv(skb); + success = ping_rcv(skb); break; case ICMPV6_PKT_TOOBIG: @@ -781,7 +781,7 @@ static int icmpv6_rcv(struct sk_buff *skb) if (type & ICMPV6_INFOMSG_MASK) break; - LIMIT_NETDEBUG(KERN_DEBUG "icmpv6: msg of unknown type\n"); + net_dbg_ratelimited("icmpv6: msg of unknown type\n"); /* * error of unknown type. @@ -791,7 +791,14 @@ static int icmpv6_rcv(struct sk_buff *skb) icmpv6_notify(skb, type, hdr->icmp6_code, hdr->icmp6_mtu); } - kfree_skb(skb); + /* until the v6 path can be better sorted assume failure and + * preserve the status quo behaviour for the rest of the paths to here + */ + if (success) + consume_skb(skb); + else + kfree_skb(skb); + return 0; csum_error: @@ -1009,4 +1016,3 @@ struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net) return table; } #endif - diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 3dd7d4ebd7cd..7221021b2d97 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -769,10 +769,9 @@ static void ip6fl_seq_stop(struct seq_file *seq, void *v) static int ip6fl_seq_show(struct seq_file *seq, void *v) { struct ip6fl_iter_state *state = ip6fl_seq_private(seq); - if (v == SEQ_START_TOKEN) - seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-32s %s\n", - "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt"); - else { + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n"); + } else { struct ip6_flowlabel *fl = v; seq_printf(seq, "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 12c3c8ef3849..f6e2533c1145 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -902,7 +902,7 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb, struct net_device_stats *stats = &t->dev->stats; int ret; - if (!ip6_tnl_xmit_ctl(t)) + if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr)) goto tx_err; switch (skb->protocol) { @@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) else dev->flags &= ~IFF_POINTOPOINT; - dev->iflink = p->link; - /* Precalculate GRE options length */ if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { if (t->parms.o_flags&GRE_CSUM) @@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev) u64_stats_init(&ip6gre_tunnel_stats->syncp); } + dev->iflink = tunnel->parms.link; return 0; } @@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev) if (!dev->tstats) return -ENOMEM; + dev->iflink = tunnel->parms.link; + return 0; } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 91014d32488d..fd76ce938c32 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -78,7 +78,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, SKB_GSO_SIT | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | - SKB_GSO_MPLS | + SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_TCPV6 | 0))) goto out; @@ -90,7 +90,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, encap = SKB_GSO_CB(skb)->encap_level > 0; if (encap) - features = skb->dev->hw_enc_features & netif_skb_features(skb); + features &= skb->dev->hw_enc_features; SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); ipv6h = ipv6_hdr(skb); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 8e950c250ada..916d2a166a9b 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -747,13 +747,11 @@ slow_path: if (len < left) { len &= ~7; } - /* - * Allocate buffer. - */ - if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + - hroom + troom, GFP_ATOMIC)) == NULL) { - NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); + /* Allocate buffer */ + frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) + + hroom + troom, GFP_ATOMIC); + if (!frag) { IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS); err = -ENOMEM; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 9409887fb664..e2b6cfba873c 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -183,6 +183,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ unsigned int hash = HASH(remote, local); struct ip6_tnl *t; struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); + struct in6_addr any; for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { if (ipv6_addr_equal(local, &t->parms.laddr) && @@ -190,6 +191,22 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_ (t->dev->flags & IFF_UP)) return t; } + + memset(&any, 0, sizeof(any)); + hash = HASH(&any, local); + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (ipv6_addr_equal(local, &t->parms.laddr) && + (t->dev->flags & IFF_UP)) + return t; + } + + hash = HASH(remote, &any); + for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) { + if (ipv6_addr_equal(remote, &t->parms.raddr) && + (t->dev->flags & IFF_UP)) + return t; + } + t = rcu_dereference(ip6n->tnls_wc[0]); if (t && (t->dev->flags & IFF_UP)) return t; @@ -272,9 +289,6 @@ static int ip6_tnl_create2(struct net_device *dev) int err; t = netdev_priv(dev); - err = ip6_tnl_dev_init(dev); - if (err < 0) - goto out; err = register_netdevice(dev); if (err < 0) @@ -477,6 +491,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, int rel_msg = 0; u8 rel_type = ICMPV6_DEST_UNREACH; u8 rel_code = ICMPV6_ADDR_UNREACH; + u8 tproto; __u32 rel_info = 0; __u16 len; int err = -ENOENT; @@ -490,7 +505,8 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, &ipv6h->saddr)) == NULL) goto out; - if (t->parms.proto != ipproto && t->parms.proto != 0) + tproto = ACCESS_ONCE(t->parms.proto); + if (tproto != ipproto && tproto != 0) goto out; err = 0; @@ -791,6 +807,7 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, { struct ip6_tnl *t; const struct ipv6hdr *ipv6h = ipv6_hdr(skb); + u8 tproto; int err; rcu_read_lock(); @@ -799,7 +816,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, &ipv6h->daddr)) != NULL) { struct pcpu_sw_netstats *tstats; - if (t->parms.proto != ipproto && t->parms.proto != 0) { + tproto = ACCESS_ONCE(t->parms.proto); + if (tproto != ipproto && tproto != 0) { rcu_read_unlock(); goto discard; } @@ -905,24 +923,28 @@ ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr) return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); } -int ip6_tnl_xmit_ctl(struct ip6_tnl *t) +int ip6_tnl_xmit_ctl(struct ip6_tnl *t, + const struct in6_addr *laddr, + const struct in6_addr *raddr) { struct __ip6_tnl_parm *p = &t->parms; int ret = 0; struct net *net = t->net; - if (p->flags & IP6_TNL_F_CAP_XMIT) { + if ((p->flags & IP6_TNL_F_CAP_XMIT) || + ((p->flags & IP6_TNL_F_CAP_PER_PACKET) && + (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) { struct net_device *ldev = NULL; rcu_read_lock(); if (p->link) ldev = dev_get_by_index_rcu(net, p->link); - if (unlikely(!ipv6_chk_addr(net, &p->laddr, ldev, 0))) + if (unlikely(!ipv6_chk_addr(net, laddr, ldev, 0))) pr_warn("%s xmit: Local address not yet configured!\n", p->name); - else if (!ipv6_addr_is_multicast(&p->raddr) && - unlikely(ipv6_chk_addr(net, &p->raddr, NULL, 0))) + else if (!ipv6_addr_is_multicast(raddr) && + unlikely(ipv6_chk_addr(net, raddr, NULL, 0))) pr_warn("%s xmit: Routing loop! Remote address found on this node!\n", p->name); else @@ -971,8 +993,34 @@ static int ip6_tnl_xmit2(struct sk_buff *skb, u8 proto; int err = -1; - if (!fl6->flowi6_mark) + /* NBMA tunnel */ + if (ipv6_addr_any(&t->parms.raddr)) { + struct in6_addr *addr6; + struct neighbour *neigh; + int addr_type; + + if (!skb_dst(skb)) + goto tx_err_link_failure; + + neigh = dst_neigh_lookup(skb_dst(skb), + &ipv6_hdr(skb)->daddr); + if (!neigh) + goto tx_err_link_failure; + + addr6 = (struct in6_addr *)&neigh->primary_key; + addr_type = ipv6_addr_type(addr6); + + if (addr_type == IPV6_ADDR_ANY) + addr6 = &ipv6_hdr(skb)->daddr; + + memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); + neigh_release(neigh); + } else if (!fl6->flowi6_mark) dst = ip6_tnl_dst_check(t); + + if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) + goto tx_err_link_failure; + if (!dst) { ndst = ip6_route_output(net, NULL, fl6); @@ -1078,10 +1126,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) struct flowi6 fl6; __u8 dsfield; __u32 mtu; + u8 tproto; int err; - if ((t->parms.proto != IPPROTO_IPIP && t->parms.proto != 0) || - !ip6_tnl_xmit_ctl(t)) + tproto = ACCESS_ONCE(t->parms.proto); + if (tproto != IPPROTO_IPIP && tproto != 0) return -1; if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) @@ -1120,10 +1169,12 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) struct flowi6 fl6; __u8 dsfield; __u32 mtu; + u8 tproto; int err; - if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || - !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) + tproto = ACCESS_ONCE(t->parms.proto); + if ((tproto != IPPROTO_IPV6 && tproto != 0) || + ip6_tnl_addr_conflict(t, ipv6h)) return -1; offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); @@ -1285,6 +1336,14 @@ static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) return err; } +static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p) +{ + /* for default tnl0 device allow to change only the proto */ + t->parms.proto = p->proto; + netdev_state_change(t->dev); + return 0; +} + static void ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u) { @@ -1384,7 +1443,7 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) break; ip6_tnl_parm_from_user(&p1, &p); t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL); - if (dev != ip6n->fb_tnl_dev && cmd == SIOCCHGTUNNEL) { + if (cmd == SIOCCHGTUNNEL) { if (t != NULL) { if (t->dev != dev) { err = -EEXIST; @@ -1392,8 +1451,10 @@ ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } else t = netdev_priv(dev); - - err = ip6_tnl_update(t, &p1); + if (dev == ip6n->fb_tnl_dev) + err = ip6_tnl0_update(t, &p1); + else + err = ip6_tnl_update(t, &p1); } if (t) { err = 0; @@ -1462,6 +1523,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) static const struct net_device_ops ip6_tnl_netdev_ops = { + .ndo_init = ip6_tnl_dev_init, .ndo_uninit = ip6_tnl_dev_uninit, .ndo_start_xmit = ip6_tnl_xmit, .ndo_do_ioctl = ip6_tnl_ioctl, @@ -1546,16 +1608,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); - int err = ip6_tnl_dev_init_gen(dev); - - if (err) - return err; t->parms.proto = IPPROTO_IPV6; dev_hold(dev); - ip6_tnl_link_config(t); - rcu_assign_pointer(ip6n->tnls_wc[0], t); return 0; } diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index d440bb585524..ec84d03491c7 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev) struct vti6_net *ip6n = net_generic(net, vti6_net_id); int err; - err = vti6_dev_init(dev); - if (err < 0) - goto out; - err = register_netdevice(dev); if (err < 0) goto out; @@ -416,6 +412,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) struct net_device_stats *stats = &t->dev->stats; struct dst_entry *dst = skb_dst(skb); struct net_device *tdev; + struct xfrm_state *x; int err = -1; if (!dst) @@ -429,7 +426,12 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) goto tx_err_link_failure; } - if (!vti6_state_check(dst->xfrm, &t->parms.raddr, &t->parms.laddr)) + x = dst->xfrm; + if (!vti6_state_check(x, &t->parms.raddr, &t->parms.laddr)) + goto tx_err_link_failure; + + if (!ip6_tnl_xmit_ctl(t, (const struct in6_addr *)&x->props.saddr, + (const struct in6_addr *)&x->id.daddr)) goto tx_err_link_failure; tdev = dst->dev; @@ -484,7 +486,7 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) ipv6h = ipv6_hdr(skb); if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) || - !ip6_tnl_xmit_ctl(t) || vti6_addr_conflict(t, ipv6h)) + vti6_addr_conflict(t, ipv6h)) goto tx_err; xfrm_decode_session(skb, &fl, AF_INET6); @@ -783,6 +785,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu) } static const struct net_device_ops vti6_netdev_ops = { + .ndo_init = vti6_dev_init, .ndo_uninit = vti6_dev_uninit, .ndo_start_xmit = vti6_tnl_xmit, .ndo_do_ioctl = vti6_ioctl, @@ -852,16 +855,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev) struct ip6_tnl *t = netdev_priv(dev); struct net *net = dev_net(dev); struct vti6_net *ip6n = net_generic(net, vti6_net_id); - int err = vti6_dev_init_gen(dev); - - if (err) - return err; t->parms.proto = IPPROTO_IPV6; dev_hold(dev); - vti6_link_config(t); - rcu_assign_pointer(ip6n->tnls_wc[0], t); return 0; } diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 0171f08325c3..467f310dbbb3 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -2090,7 +2090,7 @@ static void ip6_mr_forward(struct net *net, struct mr6_table *mrt, if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) { struct mfc6_cache *cache_proxy; - /* For an (*,G) entry, we only check that the incomming + /* For an (*,G) entry, we only check that the incoming * interface is part of the static tree. */ cache_proxy = ip6mr_cache_find_any_parent(mrt, vif); diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 9648de2b6745..5ce107c8aab3 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1550,7 +1550,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb, hdr->daddr = *daddr; } -static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) +static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu) { struct net_device *dev = idev->dev; struct net *net = dev_net(dev); @@ -1561,13 +1561,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) const struct in6_addr *saddr; int hlen = LL_RESERVED_SPACE(dev); int tlen = dev->needed_tailroom; + unsigned int size = mtu + hlen + tlen; int err; u8 ra[8] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT, 2, 0, 0, IPV6_TLV_PADN, 0 }; /* we assume size > sizeof(ra) here */ - size += hlen + tlen; /* limit our allocations to order-0 page */ size = min_t(int, size, SKB_MAX_ORDER(0, 0)); skb = sock_alloc_send_skb(sk, size, 1, &err); @@ -1576,6 +1576,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) return NULL; skb->priority = TC_PRIO_CONTROL; + skb->reserved_tailroom = skb_end_offset(skb) - + min(mtu, skb_end_offset(skb)); skb_reserve(skb, hlen); if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { @@ -1690,8 +1692,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, return skb; } -#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ - skb_tailroom(skb)) : 0) +#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0) static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted, int crsend) @@ -2823,11 +2824,7 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v) struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq); if (v == SEQ_START_TOKEN) { - seq_printf(seq, - "%3s %6s " - "%32s %32s %6s %6s\n", "Idx", - "Device", "Multicast Address", - "Source Address", "INC", "EXC"); + seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n"); } else { seq_printf(seq, "%3d %6.6s %pi6 %pi6 %6lu %6lu\n", diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c index f61429d391d3..b9779d441b12 100644 --- a/net/ipv6/mip6.c +++ b/net/ipv6/mip6.c @@ -97,16 +97,17 @@ static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) return -1; if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { - LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", - mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); + net_dbg_ratelimited("mip6: MH message too short: %d vs >=%d\n", + mh->ip6mh_hdrlen, + mip6_mh_len(mh->ip6mh_type)); mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) + skb_network_header_len(skb)); return -1; } if (mh->ip6mh_proto != IPPROTO_NONE) { - LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", - mh->ip6mh_proto); + net_dbg_ratelimited("mip6: MH invalid payload proto = %d\n", + mh->ip6mh_proto); mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) + skb_network_header_len(skb)); return -1; @@ -288,7 +289,7 @@ static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb, * XXX: packet if HAO exists. */ if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) { - LIMIT_NETDEBUG(KERN_WARNING "mip6: hao exists already, override\n"); + net_dbg_ratelimited("mip6: hao exists already, override\n"); return offset; } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 4cb45c1079a2..2c9f6bf57325 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1763,7 +1763,7 @@ int __init ndisc_init(void) /* * Initialize the neighbour table */ - neigh_table_init(&nd_tbl); + neigh_table_init(NEIGH_ND_TABLE, &nd_tbl); #ifdef CONFIG_SYSCTL err = neigh_sysctl_register(NULL, &nd_tbl.parms, @@ -1796,6 +1796,6 @@ void ndisc_cleanup(void) #ifdef CONFIG_SYSCTL neigh_sysctl_unregister(&nd_tbl.parms); #endif - neigh_table_clear(&nd_tbl); + neigh_table_clear(NEIGH_ND_TABLE, &nd_tbl); unregister_pernet_subsys(&ndisc_net_ops); } diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index d38e6a8d8b9f..398377a9d018 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -36,7 +36,7 @@ int ip6_route_me_harder(struct sk_buff *skb) err = dst->error; if (err) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); - LIMIT_NETDEBUG(KERN_DEBUG "ip6_route_me_harder: No more route.\n"); + net_dbg_ratelimited("ip6_route_me_harder: No more route\n"); dst_release(dst); return err; } diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c index 5f5f0438d74d..015eb8a80766 100644 --- a/net/ipv6/netfilter/nf_reject_ipv6.c +++ b/net/ipv6/netfilter/nf_reject_ipv6.c @@ -5,121 +5,109 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ + +#include <linux/module.h> #include <net/ipv6.h> #include <net/ip6_route.h> #include <net/ip6_fib.h> #include <net/ip6_checksum.h> #include <linux/netfilter_ipv6.h> +#include <net/netfilter/ipv6/nf_reject.h> -void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) +const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb, + struct tcphdr *otcph, + unsigned int *otcplen, int hook) { - struct sk_buff *nskb; - struct tcphdr otcph, *tcph; - unsigned int otcplen, hh_len; - int tcphoff, needs_ack; const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); - struct ipv6hdr *ip6h; -#define DEFAULT_TOS_VALUE 0x0U - const __u8 tclass = DEFAULT_TOS_VALUE; - struct dst_entry *dst = NULL; u8 proto; __be16 frag_off; - struct flowi6 fl6; - - if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || - (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { - pr_debug("addr is not unicast.\n"); - return; - } + int tcphoff; proto = oip6h->nexthdr; - tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); + tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), + &proto, &frag_off); if ((tcphoff < 0) || (tcphoff > oldskb->len)) { pr_debug("Cannot get TCP header.\n"); - return; + return NULL; } - otcplen = oldskb->len - tcphoff; + *otcplen = oldskb->len - tcphoff; /* IP header checks: fragment, too short. */ - if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { - pr_debug("proto(%d) != IPPROTO_TCP, " - "or too short. otcplen = %d\n", - proto, otcplen); - return; + if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) { + pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n", + proto, *otcplen); + return NULL; } - if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) - BUG(); + otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr), + otcph); + if (otcph == NULL) + return NULL; /* No RST for RST. */ - if (otcph.rst) { + if (otcph->rst) { pr_debug("RST is set\n"); - return; + return NULL; } /* Check checksum. */ if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { pr_debug("TCP checksum is invalid\n"); - return; + return NULL; } - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_TCP; - fl6.saddr = oip6h->daddr; - fl6.daddr = oip6h->saddr; - fl6.fl6_sport = otcph.dest; - fl6.fl6_dport = otcph.source; - security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); - dst = ip6_route_output(net, NULL, &fl6); - if (dst == NULL || dst->error) { - dst_release(dst); - return; - } - dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); - if (IS_ERR(dst)) - return; - - hh_len = (dst->dev->hard_header_len + 15)&~15; - nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) - + sizeof(struct tcphdr) + dst->trailer_len, - GFP_ATOMIC); - - if (!nskb) { - net_dbg_ratelimited("cannot alloc skb\n"); - dst_release(dst); - return; - } - - skb_dst_set(nskb, dst); + return otcph; +} +EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get); - skb_reserve(nskb, hh_len + dst->header_len); +struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + __be16 protocol, int hoplimit) +{ + struct ipv6hdr *ip6h; + const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); +#define DEFAULT_TOS_VALUE 0x0U + const __u8 tclass = DEFAULT_TOS_VALUE; skb_put(nskb, sizeof(struct ipv6hdr)); skb_reset_network_header(nskb); ip6h = ipv6_hdr(nskb); ip6_flow_hdr(ip6h, tclass, 0); - ip6h->hop_limit = ip6_dst_hoplimit(dst); - ip6h->nexthdr = IPPROTO_TCP; + ip6h->hop_limit = hoplimit; + ip6h->nexthdr = protocol; ip6h->saddr = oip6h->daddr; ip6h->daddr = oip6h->saddr; + nskb->protocol = htons(ETH_P_IPV6); + + return ip6h; +} +EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put); + +void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb, + const struct sk_buff *oldskb, + const struct tcphdr *oth, unsigned int otcplen) +{ + struct tcphdr *tcph; + int needs_ack; + skb_reset_transport_header(nskb); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); /* Truncate to length (no data) */ tcph->doff = sizeof(struct tcphdr)/4; - tcph->source = otcph.dest; - tcph->dest = otcph.source; + tcph->source = oth->dest; + tcph->dest = oth->source; - if (otcph.ack) { + if (oth->ack) { needs_ack = 0; - tcph->seq = otcph.ack_seq; + tcph->seq = oth->ack_seq; tcph->ack_seq = 0; } else { needs_ack = 1; - tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin - + otcplen - (otcph.doff<<2)); + tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + + otcplen - (oth->doff<<2)); tcph->seq = 0; } @@ -137,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) sizeof(struct tcphdr), IPPROTO_TCP, csum_partial(tcph, sizeof(struct tcphdr), 0)); +} +EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put); + +void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) +{ + struct sk_buff *nskb; + struct tcphdr _otcph; + const struct tcphdr *otcph; + unsigned int otcplen, hh_len; + const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); + struct ipv6hdr *ip6h; + struct dst_entry *dst = NULL; + struct flowi6 fl6; + + if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) || + (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) { + pr_debug("addr is not unicast.\n"); + return; + } + + otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook); + if (!otcph) + return; + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_proto = IPPROTO_TCP; + fl6.saddr = oip6h->daddr; + fl6.daddr = oip6h->saddr; + fl6.fl6_sport = otcph->dest; + fl6.fl6_dport = otcph->source; + security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6)); + dst = ip6_route_output(net, NULL, &fl6); + if (dst == NULL || dst->error) { + dst_release(dst); + return; + } + dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0); + if (IS_ERR(dst)) + return; + + hh_len = (dst->dev->hard_header_len + 15)&~15; + nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr) + + sizeof(struct tcphdr) + dst->trailer_len, + GFP_ATOMIC); + + if (!nskb) { + net_dbg_ratelimited("cannot alloc skb\n"); + dst_release(dst); + return; + } + + skb_dst_set(nskb, dst); + + skb_reserve(nskb, hh_len + dst->header_len); + ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, + ip6_dst_hoplimit(dst)); + nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen); nf_ct_attach(nskb, oldskb); @@ -161,3 +206,5 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) ip6_local_out(nskb); } EXPORT_SYMBOL_GPL(nf_send_reset6); + +MODULE_LICENSE("GPL"); diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 556262f40761..8a7ac685076d 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c @@ -39,6 +39,7 @@ static const struct nft_expr_ops nft_masq_ipv6_ops = { .eval = nft_masq_ipv6_eval, .init = nft_masq_init, .dump = nft_masq_dump, + .validate = nft_masq_validate, }; static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index fc24c390af05..97f41a3e68d9 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -3,11 +3,45 @@ * not configured or static. These functions are needed by GSO/GRO implementation. */ #include <linux/export.h> +#include <net/ip.h> #include <net/ipv6.h> #include <net/ip6_fib.h> #include <net/addrconf.h> #include <net/secure_seq.h> +/* This function exists only for tap drivers that must support broken + * clients requesting UFO without specifying an IPv6 fragment ID. + * + * This is similar to ipv6_select_ident() but we use an independent hash + * seed to limit information leakage. + * + * The network header must be set before calling this. + */ +void ipv6_proxy_select_ident(struct sk_buff *skb) +{ + static u32 ip6_proxy_idents_hashrnd __read_mostly; + struct in6_addr buf[2]; + struct in6_addr *addrs; + u32 hash, id; + + addrs = skb_header_pointer(skb, + skb_network_offset(skb) + + offsetof(struct ipv6hdr, saddr), + sizeof(buf), buf); + if (!addrs) + return; + + net_get_random_once(&ip6_proxy_idents_hashrnd, + sizeof(ip6_proxy_idents_hashrnd)); + + hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); + hash = __ipv6_addr_jhash(&addrs[0], hash); + + id = ip_idents_reserve(hash, 1); + skb_shinfo(skb)->ip6_frag_id = htonl(id); +} +EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) { u16 offset = sizeof(struct ipv6hdr); diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 1752cd0b4882..679253d0af84 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -136,6 +136,7 @@ static const struct snmp_mib snmp6_udp6_list[] = { SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS), SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_ITEM("Udp6InCsumErrors", UDP_MIB_CSUMERRORS), + SNMP_MIB_ITEM("Udp6IgnoredMulti", UDP_MIB_IGNOREDMULTI), SNMP_MIB_SENTINEL }; diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 896af8807979..0cbcf98f2cab 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -486,11 +486,11 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, } if (skb_csum_unnecessary(skb)) { - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); } else if (msg->msg_flags&MSG_TRUNC) { if (__skb_checksum_complete(skb)) goto csum_copy_err; - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); } else { err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov); if (err == -EINVAL) @@ -548,7 +548,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, if (!rp->checksum) goto send; - if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) + skb = skb_peek(&sk->sk_write_queue); + if (!skb) goto out; offset = rp->offset; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 1a157ca2ebc1..51ab096ae574 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -69,7 +69,7 @@ struct ip6frag_skb_cb { #define FRAG6_CB(skb) ((struct ip6frag_skb_cb *)((skb)->cb)) -static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) +static u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h) { return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK); } @@ -178,7 +178,7 @@ static void ip6_frag_expire(unsigned long data) ip6_expire_frag_queue(net, fq, &ip6_frags); } -static __inline__ struct frag_queue * +static struct frag_queue * fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst, u8 ecn) { @@ -684,21 +684,21 @@ static void ip6_frags_sysctl_unregister(void) unregister_net_sysctl_table(ip6_ctl_header); } #else -static inline int ip6_frags_ns_sysctl_register(struct net *net) +static int ip6_frags_ns_sysctl_register(struct net *net) { return 0; } -static inline void ip6_frags_ns_sysctl_unregister(struct net *net) +static void ip6_frags_ns_sysctl_unregister(struct net *net) { } -static inline int ip6_frags_sysctl_register(void) +static int ip6_frags_sysctl_register(void) { return 0; } -static inline void ip6_frags_sysctl_unregister(void) +static void ip6_frags_sysctl_unregister(void) { } #endif diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a318dd89b6d9..c91083156edb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -772,23 +772,22 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, } #endif -#define BACKTRACK(__net, saddr) \ -do { \ - if (rt == __net->ipv6.ip6_null_entry) { \ - struct fib6_node *pn; \ - while (1) { \ - if (fn->fn_flags & RTN_TL_ROOT) \ - goto out; \ - pn = fn->parent; \ - if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) \ - fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); \ - else \ - fn = pn; \ - if (fn->fn_flags & RTN_RTINFO) \ - goto restart; \ - } \ - } \ -} while (0) +static struct fib6_node* fib6_backtrack(struct fib6_node *fn, + struct in6_addr *saddr) +{ + struct fib6_node *pn; + while (1) { + if (fn->fn_flags & RTN_TL_ROOT) + return NULL; + pn = fn->parent; + if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) + fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); + else + fn = pn; + if (fn->fn_flags & RTN_RTINFO) + return fn; + } +} static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, @@ -804,8 +803,11 @@ restart: rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); - BACKTRACK(net, &fl6->saddr); -out: + if (rt == net->ipv6.ip6_null_entry) { + fn = fib6_backtrack(fn, &fl6->saddr); + if (fn) + goto restart; + } dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); return rt; @@ -915,33 +917,48 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, int flags) { - struct fib6_node *fn; + struct fib6_node *fn, *saved_fn; struct rt6_info *rt, *nrt; int strict = 0; int attempts = 3; int err; - int reachable = net->ipv6.devconf_all->forwarding ? 0 : RT6_LOOKUP_F_REACHABLE; strict |= flags & RT6_LOOKUP_F_IFACE; + if (net->ipv6.devconf_all->forwarding == 0) + strict |= RT6_LOOKUP_F_REACHABLE; -relookup: +redo_fib6_lookup_lock: read_lock_bh(&table->tb6_lock); -restart_2: fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); + saved_fn = fn; -restart: - rt = rt6_select(fn, oif, strict | reachable); +redo_rt6_select: + rt = rt6_select(fn, oif, strict); if (rt->rt6i_nsiblings) - rt = rt6_multipath_select(rt, fl6, oif, strict | reachable); - BACKTRACK(net, &fl6->saddr); - if (rt == net->ipv6.ip6_null_entry || - rt->rt6i_flags & RTF_CACHE) - goto out; + rt = rt6_multipath_select(rt, fl6, oif, strict); + if (rt == net->ipv6.ip6_null_entry) { + fn = fib6_backtrack(fn, &fl6->saddr); + if (fn) + goto redo_rt6_select; + else if (strict & RT6_LOOKUP_F_REACHABLE) { + /* also consider unreachable route */ + strict &= ~RT6_LOOKUP_F_REACHABLE; + fn = saved_fn; + goto redo_rt6_select; + } else { + dst_hold(&rt->dst); + read_unlock_bh(&table->tb6_lock); + goto out2; + } + } dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); + if (rt->rt6i_flags & RTF_CACHE) + goto out2; + if (!(rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY))) nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); else if (!(rt->dst.flags & DST_HOST)) @@ -967,15 +984,8 @@ restart: * released someone could insert this route. Relookup. */ ip6_rt_put(rt); - goto relookup; + goto redo_fib6_lookup_lock; -out: - if (reachable) { - reachable = 0; - goto restart_2; - } - dst_hold(&rt->dst); - read_unlock_bh(&table->tb6_lock); out2: rt->dst.lastuse = jiffies; rt->dst.__use++; @@ -1235,10 +1245,12 @@ restart: rt = net->ipv6.ip6_null_entry; else if (rt->dst.error) { rt = net->ipv6.ip6_null_entry; - goto out; + } else if (rt == net->ipv6.ip6_null_entry) { + fn = fib6_backtrack(fn, &fl6->saddr); + if (fn) + goto restart; } - BACKTRACK(net, &fl6->saddr); -out: + dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 58e5b4710127..660496de6125 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev) struct sit_net *sitn = net_generic(net, sit_net_id); int err; - err = ipip6_tunnel_init(dev); - if (err < 0) - goto out; - ipip6_tunnel_clone_6rd(dev, sitn); + memcpy(dev->dev_addr, &t->parms.iph.saddr, 4); + memcpy(dev->broadcast, &t->parms.iph.daddr, 4); if ((__force u16)t->parms.i_flags & SIT_ISATAP) dev->priv_flags |= IFF_ISATAP; @@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev) if (err < 0) goto out; - strcpy(t->parms.name, dev->name); + ipip6_tunnel_clone_6rd(dev, sitn); + dev->rtnl_link_ops = &sit_link_ops; dev_hold(dev); @@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) } static const struct net_device_ops ipip6_netdev_ops = { + .ndo_init = ipip6_tunnel_init, .ndo_uninit = ipip6_tunnel_uninit, .ndo_start_xmit = sit_tunnel_xmit, .ndo_do_ioctl = ipip6_tunnel_ioctl, @@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev) tunnel->dev = dev; tunnel->net = dev_net(dev); - - memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); - memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); + strcpy(tunnel->parms.name, dev->name); ipip6_tunnel_bind_dev(dev); dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); @@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) tunnel->dev = dev; tunnel->net = dev_net(dev); - strcpy(tunnel->parms.name, dev->name); iph->version = 4; iph->protocol = IPPROTO_IPV6; @@ -1714,7 +1711,7 @@ static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) || nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, - tunnel->encap.dport)) + tunnel->encap.flags)) goto nla_put_failure; return 0; diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 2f25cb6347ca..7337fc7947e2 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -166,13 +166,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) int mss; struct dst_entry *dst; __u8 rcv_wscale; - bool ecn_ok = false; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; - if (tcp_synq_no_recent_overflow(sk) || - (mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie)) == 0) { + if (tcp_synq_no_recent_overflow(sk)) + goto out; + + mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); + if (mss == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } @@ -183,7 +185,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, 0, NULL); - if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) + if (!cookie_timestamp_decode(&tcp_opt)) goto out; ret = NULL; @@ -220,7 +222,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) req->expires = 0UL; req->num_retrans = 0; - ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; @@ -261,6 +262,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) dst_metric(dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; + ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); ret = get_cookie_sock(sk, skb, req, dst); out: @@ -269,4 +271,3 @@ out_free: reqsk_free(req); return NULL; } - diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 831495529b82..1985b4933a6b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -200,8 +200,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, sk->sk_v6_daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; - ip6_set_txhash(sk); - /* * TCP over IPv4 */ @@ -297,6 +295,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, if (err) goto late_failure; + ip6_set_txhash(sk); + if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32, @@ -1293,6 +1293,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); + sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { @@ -1322,6 +1323,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) */ if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); + sk_mark_napi_id(sk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) @@ -1454,7 +1456,7 @@ process: if (sk_filter(sk, skb)) goto discard_and_relse; - sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); skb->dev = NULL; bh_lock_sock_nested(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f6ba535b6feb..0ba3de4f2368 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -424,8 +424,8 @@ try_again: } if (skb_csum_unnecessary(skb)) - err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), - msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, sizeof(struct udphdr), + msg, copied); else { err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); if (err == -EINVAL) @@ -577,6 +577,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if (!ipv6_addr_any(&sk->sk_v6_daddr)) { sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); + sk_incoming_cpu_update(sk); } rc = sock_queue_rcv_skb(sk, skb); @@ -659,15 +660,13 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) { if (up->pcrlen == 0) { /* full coverage was set */ - LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage" - " %d while full coverage %d requested\n", - UDP_SKB_CB(skb)->cscov, skb->len); + net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n", + UDP_SKB_CB(skb)->cscov, skb->len); goto drop; } if (UDP_SKB_CB(skb)->cscov < up->pcrlen) { - LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d " - "too small, need min %d\n", - UDP_SKB_CB(skb)->cscov, up->pcrlen); + net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n", + UDP_SKB_CB(skb)->cscov, up->pcrlen); goto drop; } } @@ -760,9 +759,9 @@ static void udp6_csum_zero_error(struct sk_buff *skb) /* RFC 2460 section 8.1 says that we SHOULD log * this error. Well, it is reasonable. */ - LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", - &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), - &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); + net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n", + &ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source), + &ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest)); } /* @@ -771,7 +770,7 @@ static void udp6_csum_zero_error(struct sk_buff *skb) */ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, - struct udp_table *udptable) + struct udp_table *udptable, int proto) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; const struct udphdr *uh = udp_hdr(skb); @@ -781,6 +780,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, int dif = inet6_iif(skb); unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node); unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10); + bool inner_flushed = false; if (use_hash2) { hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & @@ -803,6 +803,7 @@ start_lookup: (uh->check || udp_sk(sk)->no_check6_rx)) { if (unlikely(count == ARRAY_SIZE(stack))) { flush_stack(stack, count, skb, ~0); + inner_flushed = true; count = 0; } stack[count++] = sk; @@ -821,7 +822,10 @@ start_lookup: if (count) { flush_stack(stack, count, skb, count - 1); } else { - kfree_skb(skb); + if (!inner_flushed) + UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, + proto == IPPROTO_UDPLITE); + consume_skb(skb); } return 0; } @@ -873,7 +877,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, */ if (ipv6_addr_is_multicast(daddr)) return __udp6_lib_mcast_deliver(net, skb, - saddr, daddr, udptable); + saddr, daddr, udptable, proto); /* Unicast */ @@ -925,14 +929,11 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, return 0; short_packet: - LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", - proto == IPPROTO_UDPLITE ? "-Lite" : "", - saddr, - ntohs(uh->source), - ulen, - skb->len, - daddr, - ntohs(uh->dest)); + net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", + proto == IPPROTO_UDPLITE ? "-Lite" : "", + saddr, ntohs(uh->source), + ulen, skb->len, + daddr, ntohs(uh->dest)); goto discard; csum_error: UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); @@ -1284,7 +1285,7 @@ back_from_confirm: /* ... which is an evident application bug. --ANK */ release_sock(sk); - LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); + net_dbg_ratelimited("udp cork app bug 2\n"); err = -EINVAL; goto out; } diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 6b8f543f6ac6..b6aa8ed18257 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -42,11 +42,11 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, SKB_GSO_DODGY | SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP_TUNNEL_CSUM | + SKB_GSO_TUNNEL_REMCSUM | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | SKB_GSO_IPIP | - SKB_GSO_SIT | - SKB_GSO_MPLS) || + SKB_GSO_SIT) || !(type & (SKB_GSO_UDP)))) goto out; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ac49f84fe2c3..5f983644373a 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) case IPPROTO_DCCP: if (!onlyproto && (nh + offset + 4 < skb->data || pskb_may_pull(skb, nh + offset + 4 - skb->data))) { - __be16 *ports = (__be16 *)exthdr; + __be16 *ports; + nh = skb_network_header(skb); + ports = (__be16 *)(nh + offset); fl6->fl6_sport = ports[!!reverse]; fl6->fl6_dport = ports[!reverse]; } @@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) case IPPROTO_ICMPV6: if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { - u8 *icmp = (u8 *)exthdr; + u8 *icmp; + nh = skb_network_header(skb); + icmp = (u8 *)(nh + offset); fl6->fl6_icmp_type = icmp[0]; fl6->fl6_icmp_code = icmp[1]; } @@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) case IPPROTO_MH: if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { struct ip6_mh *mh; - mh = (struct ip6_mh *)exthdr; + nh = skb_network_header(skb); + mh = (struct ip6_mh *)(nh + offset); fl6->fl6_mh_type = mh->ip6mh_type; } fl6->flowi6_proto = nexthdr; diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 91729b807c7d..a0c75366c93b 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c @@ -306,7 +306,7 @@ void ipxitf_down(struct ipx_interface *intrfc) spin_unlock_bh(&ipx_interfaces_lock); } -static __inline__ void __ipxitf_put(struct ipx_interface *intrfc) +static void __ipxitf_put(struct ipx_interface *intrfc) { if (atomic_dec_and_test(&intrfc->refcnt)) __ipxitf_down(intrfc); @@ -1805,8 +1805,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - rc = skb_copy_datagram_iovec(skb, sizeof(struct ipxhdr), msg->msg_iov, - copied); + rc = skb_copy_datagram_msg(skb, sizeof(struct ipxhdr), msg, copied); if (rc) goto out_free; if (skb->tstamp.tv64) diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index e15c16a517e7..c1d247ebe916 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c @@ -45,7 +45,7 @@ static int ipx_seq_interface_show(struct seq_file *seq, void *v) } i = list_entry(v, struct ipx_interface, node); - seq_printf(seq, "%08lX ", (unsigned long int)ntohl(i->if_netnum)); + seq_printf(seq, "%08X ", ntohl(i->if_netnum)); seq_printf(seq, "%02X%02X%02X%02X%02X%02X ", i->if_node[0], i->if_node[1], i->if_node[2], i->if_node[3], i->if_node[4], i->if_node[5]); @@ -87,10 +87,10 @@ static int ipx_seq_route_show(struct seq_file *seq, void *v) rt = list_entry(v, struct ipx_route, node); - seq_printf(seq, "%08lX ", (unsigned long int)ntohl(rt->ir_net)); + seq_printf(seq, "%08X ", ntohl(rt->ir_net)); if (rt->ir_routed) - seq_printf(seq, "%08lX %02X%02X%02X%02X%02X%02X\n", - (long unsigned int)ntohl(rt->ir_intrfc->if_netnum), + seq_printf(seq, "%08X %02X%02X%02X%02X%02X%02X\n", + ntohl(rt->ir_intrfc->if_netnum), rt->ir_router_node[0], rt->ir_router_node[1], rt->ir_router_node[2], rt->ir_router_node[3], rt->ir_router_node[4], rt->ir_router_node[5]); @@ -194,19 +194,19 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v) s = v; ipxs = ipx_sk(s); #ifdef CONFIG_IPX_INTERN - seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", - (unsigned long)ntohl(ipxs->intrfc->if_netnum), + seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ", + ntohl(ipxs->intrfc->if_netnum), ipxs->node[0], ipxs->node[1], ipxs->node[2], ipxs->node[3], ipxs->node[4], ipxs->node[5], ntohs(ipxs->port)); #else - seq_printf(seq, "%08lX:%04X ", (unsigned long) ntohl(ipxs->intrfc->if_netnum), + seq_printf(seq, "%08X:%04X ", ntohl(ipxs->intrfc->if_netnum), ntohs(ipxs->port)); #endif /* CONFIG_IPX_INTERN */ if (s->sk_state != TCP_ESTABLISHED) seq_printf(seq, "%-28s", "Not_Connected"); else { - seq_printf(seq, "%08lX:%02X%02X%02X%02X%02X%02X:%04X ", - (unsigned long)ntohl(ipxs->dest_addr.net), + seq_printf(seq, "%08X:%02X%02X%02X%02X%02X%02X:%04X ", + ntohl(ipxs->dest_addr.net), ipxs->dest_addr.node[0], ipxs->dest_addr.node[1], ipxs->dest_addr.node[2], ipxs->dest_addr.node[3], ipxs->dest_addr.node[4], ipxs->dest_addr.node[5], diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c index ad7c03dedaab..0dafcc561ed6 100644 --- a/net/ipx/sysctl_net_ipx.c +++ b/net/ipx/sysctl_net_ipx.c @@ -9,14 +9,12 @@ #include <linux/mm.h> #include <linux/sysctl.h> #include <net/net_namespace.h> +#include <net/ipx.h> #ifndef CONFIG_SYSCTL #error This file should not be compiled without CONFIG_SYSCTL defined #endif -/* From af_ipx.c */ -extern int sysctl_ipx_pprop_broadcasting; - static struct ctl_table ipx_table[] = { { .procname = "ipx_pprop_broadcasting", diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 92fafd485deb..e8c409055922 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -84,14 +84,12 @@ static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb) struct sock *sk; int err; - IRDA_DEBUG(3, "%s()\n", __func__); - self = instance; sk = instance; err = sock_queue_rcv_skb(sk, skb); if (err) { - IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__); + pr_debug("%s(), error: no more mem!\n", __func__); self->rx_flow = FLOW_STOP; /* When we return error, TTP will need to requeue the skb */ @@ -115,7 +113,7 @@ static void irda_disconnect_indication(void *instance, void *sap, self = instance; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); /* Don't care about it, but let's not leak it */ if(skb) @@ -123,8 +121,8 @@ static void irda_disconnect_indication(void *instance, void *sap, sk = instance; if (sk == NULL) { - IRDA_DEBUG(0, "%s(%p) : BUG : sk is NULL\n", - __func__, self); + pr_debug("%s(%p) : BUG : sk is NULL\n", + __func__, self); return; } @@ -180,7 +178,7 @@ static void irda_connect_confirm(void *instance, void *sap, self = instance; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { @@ -201,16 +199,16 @@ static void irda_connect_confirm(void *instance, void *sap, switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { - IRDA_ERROR("%s: max_sdu_size must be 0\n", - __func__); + net_err_ratelimited("%s: max_sdu_size must be 0\n", + __func__); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { - IRDA_ERROR("%s: max_sdu_size cannot be 0\n", - __func__); + net_err_ratelimited("%s: max_sdu_size cannot be 0\n", + __func__); return; } self->max_data_size = max_sdu_size; @@ -219,8 +217,8 @@ static void irda_connect_confirm(void *instance, void *sap, self->max_data_size = irttp_get_max_seg_size(self->tsap); } - IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, - self->max_data_size); + pr_debug("%s(), max_data_size=%d\n", __func__, + self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); @@ -244,7 +242,7 @@ static void irda_connect_indication(void *instance, void *sap, self = instance; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { @@ -262,8 +260,8 @@ static void irda_connect_indication(void *instance, void *sap, switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { - IRDA_ERROR("%s: max_sdu_size must be 0\n", - __func__); + net_err_ratelimited("%s: max_sdu_size must be 0\n", + __func__); kfree_skb(skb); return; } @@ -271,8 +269,8 @@ static void irda_connect_indication(void *instance, void *sap, break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { - IRDA_ERROR("%s: max_sdu_size cannot be 0\n", - __func__); + net_err_ratelimited("%s: max_sdu_size cannot be 0\n", + __func__); kfree_skb(skb); return; } @@ -282,8 +280,8 @@ static void irda_connect_indication(void *instance, void *sap, self->max_data_size = irttp_get_max_seg_size(self->tsap); } - IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, - self->max_data_size); + pr_debug("%s(), max_data_size=%d\n", __func__, + self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); @@ -301,12 +299,10 @@ static void irda_connect_response(struct irda_sock *self) { struct sk_buff *skb; - IRDA_DEBUG(2, "%s()\n", __func__); - skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_KERNEL); if (skb == NULL) { - IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", - __func__); + pr_debug("%s() Unable to allocate sk_buff!\n", + __func__); return; } @@ -327,26 +323,24 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) struct irda_sock *self; struct sock *sk; - IRDA_DEBUG(2, "%s()\n", __func__); - self = instance; sk = instance; BUG_ON(sk == NULL); switch (flow) { case FLOW_STOP: - IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", - __func__); + pr_debug("%s(), IrTTP wants us to slow down\n", + __func__); self->tx_flow = flow; break; case FLOW_START: self->tx_flow = flow; - IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", - __func__); + pr_debug("%s(), IrTTP wants us to start again\n", + __func__); wake_up_interruptible(sk_sleep(sk)); break; default: - IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); + pr_debug("%s(), Unknown flow command!\n", __func__); /* Unknown flow command, better stop */ self->tx_flow = flow; break; @@ -368,11 +362,11 @@ static void irda_getvalue_confirm(int result, __u16 obj_id, self = priv; if (!self) { - IRDA_WARNING("%s: lost myself!\n", __func__); + net_warn_ratelimited("%s: lost myself!\n", __func__); return; } - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); /* We probably don't need to make any more queries */ iriap_close(self->iriap); @@ -380,8 +374,8 @@ static void irda_getvalue_confirm(int result, __u16 obj_id, /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(1, "%s(), IAS query failed! (%d)\n", __func__, - result); + pr_debug("%s(), IAS query failed! (%d)\n", __func__, + result); self->errno = result; /* We really need it later */ @@ -413,11 +407,9 @@ static void irda_selective_discovery_indication(discinfo_t *discovery, { struct irda_sock *self; - IRDA_DEBUG(2, "%s()\n", __func__); - self = priv; if (!self) { - IRDA_WARNING("%s: lost myself!\n", __func__); + net_warn_ratelimited("%s: lost myself!\n", __func__); return; } @@ -440,8 +432,6 @@ static void irda_discovery_timeout(u_long priv) { struct irda_sock *self; - IRDA_DEBUG(2, "%s()\n", __func__); - self = (struct irda_sock *) priv; BUG_ON(self == NULL); @@ -465,7 +455,7 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) notify_t notify; if (self->tsap) { - IRDA_DEBUG(0, "%s: busy!\n", __func__); + pr_debug("%s: busy!\n", __func__); return -EBUSY; } @@ -483,8 +473,8 @@ static int irda_open_tsap(struct irda_sock *self, __u8 tsap_sel, char *name) self->tsap = irttp_open_tsap(tsap_sel, DEFAULT_INITIAL_CREDIT, ¬ify); if (self->tsap == NULL) { - IRDA_DEBUG(0, "%s(), Unable to allocate TSAP!\n", - __func__); + pr_debug("%s(), Unable to allocate TSAP!\n", + __func__); return -ENOMEM; } /* Remember which TSAP selector we actually got */ @@ -505,7 +495,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid) notify_t notify; if (self->lsap) { - IRDA_WARNING("%s(), busy!\n", __func__); + net_warn_ratelimited("%s(), busy!\n", __func__); return -EBUSY; } @@ -517,7 +507,7 @@ static int irda_open_lsap(struct irda_sock *self, int pid) self->lsap = irlmp_open_lsap(LSAP_CONNLESS, ¬ify, pid); if (self->lsap == NULL) { - IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__); + pr_debug("%s(), Unable to allocate LSAP!\n", __func__); return -ENOMEM; } @@ -538,11 +528,11 @@ static int irda_open_lsap(struct irda_sock *self, int pid) */ static int irda_find_lsap_sel(struct irda_sock *self, char *name) { - IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name); + pr_debug("%s(%p, %s)\n", __func__, self, name); if (self->iriap) { - IRDA_WARNING("%s(): busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(): busy with a previous query\n", + __func__); return -EBUSY; } @@ -577,8 +567,8 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name) /* Get the remote TSAP selector */ switch (self->ias_result->type) { case IAS_INTEGER: - IRDA_DEBUG(4, "%s() int=%d\n", - __func__, self->ias_result->t.integer); + pr_debug("%s() int=%d\n", + __func__, self->ias_result->t.integer); if (self->ias_result->t.integer != -1) self->dtsap_sel = self->ias_result->t.integer; @@ -587,7 +577,7 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name) break; default: self->dtsap_sel = 0; - IRDA_DEBUG(0, "%s(), bad type!\n", __func__); + pr_debug("%s(), bad type!\n", __func__); break; } if (self->ias_result) @@ -625,7 +615,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ __u8 dtsap_sel = 0x0; /* TSAP associated with it */ - IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name); + pr_debug("%s(), name=%s\n", __func__, name); /* Ask lmp for the current discovery log * Note : we have to use irlmp_get_discoveries(), as opposed @@ -646,8 +636,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) /* Try the address in the log */ self->daddr = discoveries[i].daddr; self->saddr = 0x0; - IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", - __func__, self->daddr); + pr_debug("%s(), trying daddr = %08x\n", + __func__, self->daddr); /* Query remote LM-IAS for this service */ err = irda_find_lsap_sel(self, name); @@ -655,8 +645,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) case 0: /* We found the requested service */ if(daddr != DEV_ADDR_ANY) { - IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", - __func__, name); + pr_debug("%s(), discovered service ''%s'' in two different devices !!!\n", + __func__, name); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -ENOTUNIQ; @@ -670,7 +660,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) break; default: /* Something bad did happen :-( */ - IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__); + pr_debug("%s(), unexpected IAS query failure\n", + __func__); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -EHOSTUNREACH; @@ -681,8 +672,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) /* Check out what we found */ if(daddr == DEV_ADDR_ANY) { - IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", - __func__, name); + pr_debug("%s(), cannot discover service ''%s'' in any device !!!\n", + __func__, name); self->daddr = DEV_ADDR_ANY; return -EADDRNOTAVAIL; } @@ -692,8 +683,8 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) self->saddr = 0x0; self->dtsap_sel = dtsap_sel; - IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", - __func__, name, self->daddr); + pr_debug("%s(), discovered requested service ''%s'' at address %08x\n", + __func__, name, self->daddr); return 0; } @@ -725,8 +716,8 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr, saddr.sir_addr = self->saddr; } - IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); - IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr); + pr_debug("%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); + pr_debug("%s(), addr = %08x\n", __func__, saddr.sir_addr); /* uaddr_len come to us uninitialised */ *uaddr_len = sizeof (struct sockaddr_irda); @@ -746,8 +737,6 @@ static int irda_listen(struct socket *sock, int backlog) struct sock *sk = sock->sk; int err = -EOPNOTSUPP; - IRDA_DEBUG(2, "%s()\n", __func__); - lock_sock(sk); if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && @@ -779,7 +768,7 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) struct irda_sock *self = irda_sk(sk); int err; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); if (addr_len != sizeof(struct sockaddr_irda)) return -EINVAL; @@ -792,7 +781,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) self->pid = addr->sir_lsap_sel; err = -EOPNOTSUPP; if (self->pid & 0x80) { - IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); + pr_debug("%s(), extension in PID not supp!\n", + __func__); goto out; } err = irda_open_lsap(self, self->pid); @@ -845,8 +835,6 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) struct sk_buff *skb; int err; - IRDA_DEBUG(2, "%s()\n", __func__); - err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); if (err) return err; @@ -911,7 +899,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags) new->tsap = irttp_dup(self->tsap, new); err = -EPERM; /* value does not seem to make sense. -arnd */ if (!new->tsap) { - IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); + pr_debug("%s(), dup failed!\n", __func__); kfree_skb(skb); goto out; } @@ -971,7 +959,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, struct irda_sock *self = irda_sk(sk); int err; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); lock_sock(sk); /* Don't allow connect for Ultra sockets */ @@ -1007,13 +995,13 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, /* Try to find one suitable */ err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); if (err) { - IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); + pr_debug("%s(), auto-connect failed!\n", __func__); goto out; } } else { /* Use the one provided by the user */ self->daddr = addr->sir_addr; - IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr); + pr_debug("%s(), daddr = %08x\n", __func__, self->daddr); /* If we don't have a valid service name, we assume the * user want to connect on a specific LSAP. Prevent @@ -1023,7 +1011,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, /* Query remote LM-IAS using service name */ err = irda_find_lsap_sel(self, addr->sir_name); if (err) { - IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); + pr_debug("%s(), connect failed!\n", __func__); goto out; } } else { @@ -1048,7 +1036,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, self->saddr, self->daddr, NULL, self->max_sdu_size_rx, NULL); if (err) { - IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); + pr_debug("%s(), connect failed!\n", __func__); goto out; } @@ -1064,8 +1052,6 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr, if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; - if (sk->sk_prot->disconnect(sk, flags)) - sock->state = SS_DISCONNECTING; err = sock_error(sk); if (!err) err = -ECONNRESET; @@ -1100,8 +1086,6 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, struct sock *sk; struct irda_sock *self; - IRDA_DEBUG(2, "%s()\n", __func__); - if (net != &init_net) return -EAFNOSUPPORT; @@ -1121,7 +1105,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, return -ENOMEM; self = irda_sk(sk); - IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self); + pr_debug("%s() : self is %p\n", __func__, self); init_waitqueue_head(&self->query_wait); @@ -1183,7 +1167,7 @@ static int irda_create(struct net *net, struct socket *sock, int protocol, */ static void irda_destroy_socket(struct irda_sock *self) { - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); /* Unregister with IrLMP */ irlmp_unregister_client(self->ckey); @@ -1220,8 +1204,6 @@ static int irda_release(struct socket *sock) { struct sock *sk = sock->sk; - IRDA_DEBUG(2, "%s()\n", __func__); - if (sk == NULL) return 0; @@ -1288,7 +1270,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int err = -EPIPE; - IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); + pr_debug("%s(), len=%zd\n", __func__, len); /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | @@ -1324,8 +1306,8 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, /* Check that we don't send out too big frames */ if (len > self->max_data_size) { - IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", - __func__, len, self->max_data_size); + pr_debug("%s(), Chopping frame from %zd to %d bytes!\n", + __func__, len, self->max_data_size); len = self->max_data_size; } @@ -1349,7 +1331,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, */ err = irttp_data_request(self->tsap, skb); if (err) { - IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); + pr_debug("%s(), err=%d\n", __func__, err); goto out_err; } @@ -1380,8 +1362,6 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, size_t copied; int err; - IRDA_DEBUG(4, "%s()\n", __func__); - skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) @@ -1391,12 +1371,12 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, copied = skb->len; if (copied > size) { - IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n", - __func__, copied, size); + pr_debug("%s(), Received truncated frame (%zd < %zd)!\n", + __func__, copied, size); copied = size; msg->msg_flags |= MSG_TRUNC; } - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); @@ -1408,7 +1388,7 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { - IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); + pr_debug("%s(), Starting IrTTP\n", __func__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } @@ -1430,8 +1410,6 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, int target, err; long timeo; - IRDA_DEBUG(3, "%s()\n", __func__); - if ((err = sock_error(sk)) < 0) return err; @@ -1503,15 +1481,15 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, /* put the skb back if we didn't use it up.. */ if (skb->len) { - IRDA_DEBUG(1, "%s(), back on q!\n", - __func__); + pr_debug("%s(), back on q!\n", + __func__); skb_queue_head(&sk->sk_receive_queue, skb); break; } kfree_skb(skb); } else { - IRDA_DEBUG(0, "%s() questionable!?\n", __func__); + pr_debug("%s() questionable!?\n", __func__); /* put message back and return */ skb_queue_head(&sk->sk_receive_queue, skb); @@ -1527,7 +1505,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock, */ if (self->rx_flow == FLOW_STOP) { if ((atomic_read(&sk->sk_rmem_alloc) << 2) <= sk->sk_rcvbuf) { - IRDA_DEBUG(2, "%s(), Starting IrTTP\n", __func__); + pr_debug("%s(), Starting IrTTP\n", __func__); self->rx_flow = FLOW_START; irttp_flow_request(self->tsap, FLOW_START); } @@ -1551,7 +1529,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int err; - IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); + pr_debug("%s(), len=%zd\n", __func__, len); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) return -EINVAL; @@ -1575,9 +1553,8 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { - IRDA_DEBUG(0, "%s(), Warning to much data! " - "Chopping frame from %zd to %d bytes!\n", - __func__, len, self->max_data_size); + pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n", + __func__, len, self->max_data_size); len = self->max_data_size; } @@ -1590,7 +1567,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); - IRDA_DEBUG(4, "%s(), appending user data\n", __func__); + pr_debug("%s(), appending user data\n", __func__); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { @@ -1604,7 +1581,7 @@ static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, */ err = irttp_udata_request(self->tsap, skb); if (err) { - IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); + pr_debug("%s(), err=%d\n", __func__, err); goto out; } @@ -1633,7 +1610,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb; int err; - IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); + pr_debug("%s(), len=%zd\n", __func__, len); err = -EINVAL; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) @@ -1661,7 +1638,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, pid = addr->sir_lsap_sel; if (pid & 0x80) { - IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); + pr_debug("%s(), extension in PID not supp!\n", + __func__); err = -EOPNOTSUPP; goto out; } @@ -1670,8 +1648,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, * port. Jean II */ if ((self->lsap == NULL) || (sk->sk_state != TCP_ESTABLISHED)) { - IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", - __func__); + pr_debug("%s(), socket not bound to Ultra PID.\n", + __func__); err = -ENOTCONN; goto out; } @@ -1684,9 +1662,8 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { - IRDA_DEBUG(0, "%s(), Warning to much data! " - "Chopping frame from %zd to %d bytes!\n", - __func__, len, self->max_data_size); + pr_debug("%s(), Warning too much data! Chopping frame from %zd to %d bytes!\n", + __func__, len, self->max_data_size); len = self->max_data_size; } @@ -1699,7 +1676,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); - IRDA_DEBUG(4, "%s(), appending user data\n", __func__); + pr_debug("%s(), appending user data\n", __func__); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { @@ -1710,7 +1687,7 @@ static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, err = irlmp_connless_data_request((bound ? self->lsap : NULL), skb, pid); if (err) - IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); + pr_debug("%s(), err=%d\n", __func__, err); out: release_sock(sk); return err ? : len; @@ -1725,7 +1702,7 @@ static int irda_shutdown(struct socket *sock, int how) struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); - IRDA_DEBUG(1, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); lock_sock(sk); @@ -1764,8 +1741,6 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, struct irda_sock *self = irda_sk(sk); unsigned int mask; - IRDA_DEBUG(4, "%s()\n", __func__); - poll_wait(file, sk_sleep(sk), wait); mask = 0; @@ -1773,13 +1748,13 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, if (sk->sk_err) mask |= POLLERR; if (sk->sk_shutdown & RCV_SHUTDOWN) { - IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); + pr_debug("%s(), POLLHUP\n", __func__); mask |= POLLHUP; } /* Readable? */ if (!skb_queue_empty(&sk->sk_receive_queue)) { - IRDA_DEBUG(4, "Socket is readable\n"); + pr_debug("Socket is readable\n"); mask |= POLLIN | POLLRDNORM; } @@ -1787,7 +1762,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock, switch (sk->sk_type) { case SOCK_STREAM: if (sk->sk_state == TCP_CLOSE) { - IRDA_DEBUG(0, "%s(), POLLHUP\n", __func__); + pr_debug("%s(), POLLHUP\n", __func__); mask |= POLLHUP; } @@ -1825,7 +1800,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) struct sock *sk = sock->sk; int err; - IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); + pr_debug("%s(), cmd=%#x\n", __func__, cmd); err = -EINVAL; switch (cmd) { @@ -1866,7 +1841,7 @@ static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCSIFMETRIC: break; default: - IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); + pr_debug("%s(), doing device ioctl!\n", __func__); err = -ENOIOCTLCMD; } @@ -1902,7 +1877,7 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, struct ias_attrib * ias_attr; /* Attribute in IAS object */ int opt, free_ias = 0, err = 0; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; @@ -2102,7 +2077,8 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, /* Check is the user space own the object */ if(ias_attr->value->owner != IAS_USER_ATTR) { - IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__); + pr_debug("%s(), attempting to delete a kernel attribute\n", + __func__); kfree(ias_opt); err = -EPERM; goto out; @@ -2125,12 +2101,12 @@ static int irda_setsockopt(struct socket *sock, int level, int optname, /* Only possible for a seqpacket service (TTP with SAR) */ if (sk->sk_type != SOCK_SEQPACKET) { - IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", - __func__, opt); + pr_debug("%s(), setting max_sdu_size = %d\n", + __func__, opt); self->max_sdu_size_rx = opt; } else { - IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", - __func__); + net_warn_ratelimited("%s: not allowed to set MAXSDUSIZE for this socket type!\n", + __func__); err = -ENOPROTOOPT; goto out; } @@ -2258,7 +2234,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname, int err = 0; int offset, total; - IRDA_DEBUG(2, "%s(%p)\n", __func__, self); + pr_debug("%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; @@ -2441,8 +2417,8 @@ bed: /* Check that we can proceed with IAP */ if (self->iriap) { - IRDA_WARNING("%s: busy with a previous query\n", - __func__); + net_warn_ratelimited("%s: busy with a previous query\n", + __func__); kfree(ias_opt); err = -EBUSY; goto out; @@ -2546,7 +2522,8 @@ bed: /* Wait until a node is discovered */ if (!self->cachedaddr) { - IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__); + pr_debug("%s(), nothing discovered yet, going to sleep...\n", + __func__); /* Set watchdog timer to expire in <val> ms. */ self->errno = 0; @@ -2562,14 +2539,14 @@ bed: /* If watchdog is still activated, kill it! */ del_timer(&(self->watchdog)); - IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__); + pr_debug("%s(), ...waking up !\n", __func__); if (err != 0) goto out; } else - IRDA_DEBUG(1, "%s(), found immediately !\n", - __func__); + pr_debug("%s(), found immediately !\n", + __func__); /* Tell IrLMP that we have been notified */ irlmp_update_client(self->ckey, self->mask.word, diff --git a/net/irda/discovery.c b/net/irda/discovery.c index 6786e7f193d2..364d70aed068 100644 --- a/net/irda/discovery.c +++ b/net/irda/discovery.c @@ -112,8 +112,6 @@ void irlmp_add_discovery_log(hashbin_t *cachelog, hashbin_t *log) { discovery_t *discovery; - IRDA_DEBUG(4, "%s()\n", __func__); - /* * If log is missing this means that IrLAP was unable to perform the * discovery, so restart discovery again with just the half timeout @@ -159,8 +157,6 @@ void irlmp_expire_discoveries(hashbin_t *log, __u32 saddr, int force) int i = 0; /* How many we expired */ IRDA_ASSERT(log != NULL, return;); - IRDA_DEBUG(4, "%s()\n", __func__); - spin_lock_irqsave(&log->hb_spinlock, flags); discovery = (discovery_t *) hashbin_get_first(log); @@ -232,10 +228,10 @@ void irlmp_dump_discoveries(hashbin_t *log) discovery = (discovery_t *) hashbin_get_first(log); while (discovery != NULL) { - IRDA_DEBUG(0, "Discovery:\n"); - IRDA_DEBUG(0, " daddr=%08x\n", discovery->data.daddr); - IRDA_DEBUG(0, " saddr=%08x\n", discovery->data.saddr); - IRDA_DEBUG(0, " nickname=%s\n", discovery->data.info); + pr_debug("Discovery:\n"); + pr_debug(" daddr=%08x\n", discovery->data.daddr); + pr_debug(" saddr=%08x\n", discovery->data.saddr); + pr_debug(" nickname=%s\n", discovery->data.info); discovery = (discovery_t *) hashbin_get_next(log); } diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c index 4490a675b1bb..3af219545f6d 100644 --- a/net/irda/ircomm/ircomm_core.c +++ b/net/irda/ircomm/ircomm_core.c @@ -69,7 +69,8 @@ static int __init ircomm_init(void) { ircomm = hashbin_new(HB_LOCK); if (ircomm == NULL) { - IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__); + net_err_ratelimited("%s(), can't allocate hashbin!\n", + __func__); return -ENOMEM; } @@ -83,15 +84,13 @@ static int __init ircomm_init(void) } #endif /* CONFIG_PROC_FS */ - IRDA_MESSAGE("IrCOMM protocol (Dag Brattli)\n"); + net_info_ratelimited("IrCOMM protocol (Dag Brattli)\n"); return 0; } static void __exit ircomm_cleanup(void) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - hashbin_delete(ircomm, (FREE_FUNC) __ircomm_close); #ifdef CONFIG_PROC_FS @@ -110,8 +109,8 @@ struct ircomm_cb *ircomm_open(notify_t *notify, __u8 service_type, int line) struct ircomm_cb *self = NULL; int ret; - IRDA_DEBUG(2, "%s(), service_type=0x%02x\n", __func__ , - service_type); + pr_debug("%s(), service_type=0x%02x\n", __func__ , + service_type); IRDA_ASSERT(ircomm != NULL, return NULL;); @@ -154,8 +153,6 @@ EXPORT_SYMBOL(ircomm_open); */ static int __ircomm_close(struct ircomm_cb *self) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Disconnect link if any */ ircomm_do_event(self, IRCOMM_DISCONNECT_REQUEST, NULL, NULL); @@ -190,8 +187,6 @@ int ircomm_close(struct ircomm_cb *self) IRDA_ASSERT(self != NULL, return -EIO;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EIO;); - IRDA_DEBUG(0, "%s()\n", __func__ ); - entry = hashbin_remove(ircomm, self->line, NULL); IRDA_ASSERT(entry == self, return -1;); @@ -215,8 +210,6 @@ int ircomm_connect_request(struct ircomm_cb *self, __u8 dlsap_sel, struct ircomm_info info; int ret; - IRDA_DEBUG(2 , "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); @@ -242,8 +235,6 @@ EXPORT_SYMBOL(ircomm_connect_request); void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* * If there are any data hiding in the control channel, we must * deliver it first. The side effect is that the control channel @@ -254,7 +245,7 @@ void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb, info->qos, info->max_data_size, info->max_header_size, skb); else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -271,8 +262,6 @@ int ircomm_connect_response(struct ircomm_cb *self, struct sk_buff *userdata) IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); - IRDA_DEBUG(4, "%s()\n", __func__ ); - ret = ircomm_do_event(self, IRCOMM_CONNECT_RESPONSE, userdata, NULL); return ret; @@ -289,15 +278,13 @@ EXPORT_SYMBOL(ircomm_connect_response); void ircomm_connect_confirm(struct ircomm_cb *self, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - if (self->notify.connect_confirm ) self->notify.connect_confirm(self->notify.instance, self, info->qos, info->max_data_size, info->max_header_size, skb); else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -311,8 +298,6 @@ int ircomm_data_request(struct ircomm_cb *self, struct sk_buff *skb) { int ret; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -EFAULT;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); IRDA_ASSERT(skb != NULL, return -EFAULT;); @@ -332,14 +317,12 @@ EXPORT_SYMBOL(ircomm_data_request); */ void ircomm_data_indication(struct ircomm_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(skb->len > 0, return;); if (self->notify.data_indication) self->notify.data_indication(self->notify.instance, self, skb); else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -364,8 +347,8 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb) * fine */ if (unlikely(skb->len < (clen + 1))) { - IRDA_DEBUG(2, "%s() throwing away illegal frame\n", - __func__ ); + pr_debug("%s() throwing away illegal frame\n", + __func__); return; } @@ -383,8 +366,8 @@ void ircomm_process_data(struct ircomm_cb *self, struct sk_buff *skb) if (skb->len) ircomm_data_indication(self, skb); else { - IRDA_DEBUG(4, "%s(), data was control info only!\n", - __func__ ); + pr_debug("%s(), data was control info only!\n", + __func__); } } @@ -398,8 +381,6 @@ int ircomm_control_request(struct ircomm_cb *self, struct sk_buff *skb) { int ret; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -EFAULT;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -EFAULT;); IRDA_ASSERT(skb != NULL, return -EFAULT;); @@ -420,8 +401,6 @@ EXPORT_SYMBOL(ircomm_control_request); static void ircomm_control_indication(struct ircomm_cb *self, struct sk_buff *skb, int clen) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Use udata for delivering data on the control channel */ if (self->notify.udata_indication) { struct sk_buff *ctrl_skb; @@ -441,7 +420,7 @@ static void ircomm_control_indication(struct ircomm_cb *self, * see ircomm_tty_control_indication(). */ dev_kfree_skb(ctrl_skb); } else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -456,8 +435,6 @@ int ircomm_disconnect_request(struct ircomm_cb *self, struct sk_buff *userdata) struct ircomm_info info; int ret; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); @@ -477,15 +454,13 @@ EXPORT_SYMBOL(ircomm_disconnect_request); void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(info != NULL, return;); if (self->notify.disconnect_indication) { self->notify.disconnect_indication(self->notify.instance, self, info->reason, skb); } else { - IRDA_DEBUG(0, "%s(), missing handler\n", __func__ ); + pr_debug("%s(), missing handler\n", __func__); } } @@ -497,8 +472,6 @@ void ircomm_disconnect_indication(struct ircomm_cb *self, struct sk_buff *skb, */ void ircomm_flow_request(struct ircomm_cb *self, LOCAL_FLOW flow) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c index b172c6522328..b0730ac9f388 100644 --- a/net/irda/ircomm/ircomm_event.c +++ b/net/irda/ircomm/ircomm_event.c @@ -54,8 +54,7 @@ const char *const ircomm_state[] = { "IRCOMM_CONN", }; -#ifdef CONFIG_IRDA_DEBUG -static const char *const ircomm_event[] = { +static const char *const ircomm_event[] __maybe_unused = { "IRCOMM_CONNECT_REQUEST", "IRCOMM_CONNECT_RESPONSE", "IRCOMM_TTP_CONNECT_INDICATION", @@ -73,7 +72,6 @@ static const char *const ircomm_event[] = { "IRCOMM_CONTROL_REQUEST", "IRCOMM_CONTROL_INDICATION", }; -#endif /* CONFIG_IRDA_DEBUG */ static int (*state[])(struct ircomm_cb *self, IRCOMM_EVENT event, struct sk_buff *skb, struct ircomm_info *info) = @@ -106,8 +104,8 @@ static int ircomm_state_idle(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_connect_indication(self, skb, info); break; default: - IRDA_DEBUG(4, "%s(), unknown event: %s\n", __func__ , - ircomm_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_event[event]); ret = -EINVAL; } return ret; @@ -136,8 +134,8 @@ static int ircomm_state_waiti(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_disconnect_indication(self, skb, info); break; default: - IRDA_DEBUG(0, "%s(), unknown event: %s\n", __func__ , - ircomm_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_event[event]); ret = -EINVAL; } return ret; @@ -169,8 +167,8 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event, ircomm_disconnect_indication(self, skb, info); break; default: - IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ , - ircomm_event[event]); + pr_debug("%s(), unknown event = %s\n", __func__ , + ircomm_event[event]); ret = -EINVAL; } return ret; @@ -211,8 +209,8 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, ret = self->issue.disconnect_request(self, skb, info); break; default: - IRDA_DEBUG(0, "%s(), unknown event = %s\n", __func__ , - ircomm_event[event]); + pr_debug("%s(), unknown event = %s\n", __func__ , + ircomm_event[event]); ret = -EINVAL; } return ret; @@ -227,8 +225,8 @@ static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event, int ircomm_do_event(struct ircomm_cb *self, IRCOMM_EVENT event, struct sk_buff *skb, struct ircomm_info *info) { - IRDA_DEBUG(4, "%s: state=%s, event=%s\n", __func__ , - ircomm_state[self->state], ircomm_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_state[self->state], ircomm_event[event]); return (*state[self->state])(self, event, skb, info); } @@ -243,6 +241,6 @@ void ircomm_next_state(struct ircomm_cb *self, IRCOMM_STATE state) { self->state = state; - IRDA_DEBUG(4, "%s: next state=%s, service type=%d\n", __func__ , - ircomm_state[self->state], self->service_type); + pr_debug("%s: next state=%s, service type=%d\n", __func__ , + ircomm_state[self->state], self->service_type); } diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c index 6536114adf37..e4cc847bb933 100644 --- a/net/irda/ircomm/ircomm_lmp.c +++ b/net/irda/ircomm/ircomm_lmp.c @@ -52,8 +52,6 @@ static int ircomm_lmp_connect_request(struct ircomm_cb *self, { int ret = 0; - IRDA_DEBUG(0, "%s()\n", __func__ ); - /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); @@ -74,8 +72,6 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self, { struct sk_buff *tx_skb; - IRDA_DEBUG(0, "%s()\n", __func__ ); - /* Any userdata supplied? */ if (userdata == NULL) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); @@ -107,8 +103,6 @@ static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, struct sk_buff *tx_skb; int ret; - IRDA_DEBUG(0, "%s()\n", __func__ ); - if (!userdata) { tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (!tx_skb) @@ -144,13 +138,11 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb) cb = (struct irda_skb_cb *) skb->cb; - IRDA_DEBUG(2, "%s()\n", __func__ ); - line = cb->line; self = (struct ircomm_cb *) hashbin_lock_find(ircomm, line, NULL); if (!self) { - IRDA_DEBUG(2, "%s(), didn't find myself\n", __func__ ); + pr_debug("%s(), didn't find myself\n", __func__); return; } @@ -160,7 +152,7 @@ static void ircomm_lmp_flow_control(struct sk_buff *skb) self->pkt_count--; if ((self->pkt_count < 2) && (self->flow_status == FLOW_STOP)) { - IRDA_DEBUG(2, "%s(), asking TTY to start again!\n", __func__ ); + pr_debug("%s(), asking TTY to start again!\n", __func__); self->flow_status = FLOW_START; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, @@ -187,7 +179,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, cb->line = self->line; - IRDA_DEBUG(4, "%s(), sending frame\n", __func__ ); + pr_debug("%s(), sending frame\n", __func__); /* Don't forget to refcount it - see ircomm_tty_do_softint() */ skb_get(skb); @@ -196,7 +188,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, skb->destructor = ircomm_lmp_flow_control; if ((self->pkt_count++ > 7) && (self->flow_status == FLOW_START)) { - IRDA_DEBUG(2, "%s(), asking TTY to slow down!\n", __func__ ); + pr_debug("%s(), asking TTY to slow down!\n", __func__); self->flow_status = FLOW_STOP; if (self->notify.flow_indication) self->notify.flow_indication(self->notify.instance, @@ -204,7 +196,7 @@ static int ircomm_lmp_data_request(struct ircomm_cb *self, } ret = irlmp_data_request(self->lsap, skb); if (ret) { - IRDA_ERROR("%s(), failed\n", __func__); + net_err_ratelimited("%s(), failed\n", __func__); /* irlmp_data_request already free the packet */ } @@ -222,8 +214,6 @@ static int ircomm_lmp_data_indication(void *instance, void *sap, { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); @@ -252,8 +242,6 @@ static void ircomm_lmp_connect_confirm(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -285,8 +273,6 @@ static void ircomm_lmp_connect_indication(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *)instance; struct ircomm_info info; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -315,8 +301,6 @@ static void ircomm_lmp_disconnect_indication(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -338,8 +322,6 @@ int ircomm_open_lsap(struct ircomm_cb *self) { notify_t notify; - IRDA_DEBUG(0, "%s()\n", __func__ ); - /* Register callbacks */ irda_notify_init(¬ify); notify.data_indication = ircomm_lmp_data_indication; @@ -351,7 +333,7 @@ int ircomm_open_lsap(struct ircomm_cb *self) self->lsap = irlmp_open_lsap(LSAP_ANY, ¬ify, 0); if (!self->lsap) { - IRDA_DEBUG(0,"%sfailed to allocate tsap\n", __func__ ); + pr_debug("%sfailed to allocate tsap\n", __func__); return -1; } self->slsap_sel = self->lsap->slsap_sel; diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index f80b1a6a244b..27be782be7e7 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c @@ -101,8 +101,6 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) struct sk_buff *skb; int count; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); @@ -130,7 +128,8 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) count = irda_param_insert(self, pi, skb_tail_pointer(skb), skb_tailroom(skb), &ircomm_param_info); if (count < 0) { - IRDA_WARNING("%s(), no room for parameter!\n", __func__); + net_warn_ratelimited("%s(), no room for parameter!\n", + __func__); spin_unlock_irqrestore(&self->spinlock, flags); return -1; } @@ -138,7 +137,7 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) spin_unlock_irqrestore(&self->spinlock, flags); - IRDA_DEBUG(2, "%s(), skb->len=%d\n", __func__ , skb->len); + pr_debug("%s(), skb->len=%d\n", __func__ , skb->len); if (flush) { /* ircomm_tty_do_softint will take care of the rest */ @@ -172,12 +171,11 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param, /* Find all common service types */ service_type &= self->service_type; if (!service_type) { - IRDA_DEBUG(2, - "%s(), No common service type to use!\n", __func__ ); + pr_debug("%s(), No common service type to use!\n", __func__); return -1; } - IRDA_DEBUG(0, "%s(), services in common=%02x\n", __func__ , - service_type); + pr_debug("%s(), services in common=%02x\n", __func__ , + service_type); /* * Now choose a preferred service type of those available @@ -191,8 +189,8 @@ static int ircomm_param_service_type(void *instance, irda_param_t *param, else if (service_type & IRCOMM_3_WIRE_RAW) self->settings.service_type = IRCOMM_3_WIRE_RAW; - IRDA_DEBUG(0, "%s(), resulting service type=0x%02x\n", __func__ , - self->settings.service_type); + pr_debug("%s(), resulting service type=0x%02x\n", __func__ , + self->settings.service_type); /* * Now the line is ready for some communication. Check if we are a @@ -234,8 +232,8 @@ static int ircomm_param_port_type(void *instance, irda_param_t *param, int get) else { self->settings.port_type = (__u8) param->pv.i; - IRDA_DEBUG(0, "%s(), port type=%d\n", __func__ , - self->settings.port_type); + pr_debug("%s(), port type=%d\n", __func__ , + self->settings.port_type); } return 0; } @@ -254,9 +252,9 @@ static int ircomm_param_port_name(void *instance, irda_param_t *param, int get) IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); if (get) { - IRDA_DEBUG(0, "%s(), not imp!\n", __func__ ); + pr_debug("%s(), not imp!\n", __func__); } else { - IRDA_DEBUG(0, "%s(), port-name=%s\n", __func__ , param->pv.c); + pr_debug("%s(), port-name=%s\n", __func__ , param->pv.c); strncpy(self->settings.port_name, param->pv.c, 32); } @@ -281,7 +279,7 @@ static int ircomm_param_data_rate(void *instance, irda_param_t *param, int get) else self->settings.data_rate = param->pv.i; - IRDA_DEBUG(2, "%s(), data rate = %d\n", __func__ , param->pv.i); + pr_debug("%s(), data rate = %d\n", __func__ , param->pv.i); return 0; } @@ -327,7 +325,7 @@ static int ircomm_param_flow_control(void *instance, irda_param_t *param, else self->settings.flow_control = (__u8) param->pv.i; - IRDA_DEBUG(1, "%s(), flow control = 0x%02x\n", __func__ , (__u8) param->pv.i); + pr_debug("%s(), flow control = 0x%02x\n", __func__ , (__u8)param->pv.i); return 0; } @@ -353,8 +351,8 @@ static int ircomm_param_xon_xoff(void *instance, irda_param_t *param, int get) self->settings.xonxoff[1] = (__u16) param->pv.i >> 8; } - IRDA_DEBUG(0, "%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ , - param->pv.i & 0xff, param->pv.i >> 8); + pr_debug("%s(), XON/XOFF = 0x%02x,0x%02x\n", __func__ , + param->pv.i & 0xff, param->pv.i >> 8); return 0; } @@ -380,8 +378,8 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get) self->settings.enqack[1] = (__u16) param->pv.i >> 8; } - IRDA_DEBUG(0, "%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ , - param->pv.i & 0xff, param->pv.i >> 8); + pr_debug("%s(), ENQ/ACK = 0x%02x,0x%02x\n", __func__ , + param->pv.i & 0xff, param->pv.i >> 8); return 0; } @@ -395,7 +393,7 @@ static int ircomm_param_enq_ack(void *instance, irda_param_t *param, int get) static int ircomm_param_line_status(void *instance, irda_param_t *param, int get) { - IRDA_DEBUG(2, "%s(), not impl.\n", __func__ ); + pr_debug("%s(), not impl.\n", __func__); return 0; } @@ -456,7 +454,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; __u8 dce; - IRDA_DEBUG(1, "%s(), dce = 0x%02x\n", __func__ , (__u8) param->pv.i); + pr_debug("%s(), dce = 0x%02x\n", __func__ , (__u8)param->pv.i); dce = (__u8) param->pv.i; @@ -468,7 +466,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get) /* Check if any of the settings have changed */ if (dce & 0x0f) { if (dce & IRCOMM_DELTA_CTS) { - IRDA_DEBUG(2, "%s(), CTS\n", __func__ ); + pr_debug("%s(), CTS\n", __func__); } } diff --git a/net/irda/ircomm/ircomm_ttp.c b/net/irda/ircomm/ircomm_ttp.c index d362d711b79c..4b81e0934770 100644 --- a/net/irda/ircomm/ircomm_ttp.c +++ b/net/irda/ircomm/ircomm_ttp.c @@ -76,8 +76,6 @@ int ircomm_open_tsap(struct ircomm_cb *self) { notify_t notify; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* Register callbacks */ irda_notify_init(¬ify); notify.data_indication = ircomm_ttp_data_indication; @@ -91,7 +89,7 @@ int ircomm_open_tsap(struct ircomm_cb *self) self->tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!self->tsap) { - IRDA_DEBUG(0, "%sfailed to allocate tsap\n", __func__ ); + pr_debug("%sfailed to allocate tsap\n", __func__); return -1; } self->slsap_sel = self->tsap->stsap_sel; @@ -119,8 +117,6 @@ static int ircomm_ttp_connect_request(struct ircomm_cb *self, { int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); @@ -143,8 +139,6 @@ static int ircomm_ttp_connect_response(struct ircomm_cb *self, { int ret; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* Don't forget to refcount it - should be NULL anyway */ if(userdata) skb_get(userdata); @@ -171,7 +165,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self, IRDA_ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(2, "%s(), clen=%d\n", __func__ , clen); + pr_debug("%s(), clen=%d\n", __func__ , clen); /* * Insert clen field, currently we either send data only, or control @@ -188,7 +182,7 @@ static int ircomm_ttp_data_request(struct ircomm_cb *self, ret = irttp_data_request(self->tsap, skb); if (ret) { - IRDA_ERROR("%s(), failed\n", __func__); + net_err_ratelimited("%s(), failed\n", __func__); /* irttp_data_request already free the packet */ } @@ -206,8 +200,6 @@ static int ircomm_ttp_data_indication(void *instance, void *sap, { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); @@ -229,16 +221,14 @@ static void ircomm_ttp_connect_confirm(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(qos != NULL, goto out;); if (max_sdu_size != TTP_SAR_DISABLE) { - IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", - __func__); + net_err_ratelimited("%s(), SAR not allowed for IrCOMM!\n", + __func__); goto out; } @@ -270,16 +260,14 @@ static void ircomm_ttp_connect_indication(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *)instance; struct ircomm_info info; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(qos != NULL, goto out;); if (max_sdu_size != TTP_SAR_DISABLE) { - IRDA_ERROR("%s(), SAR not allowed for IrCOMM!\n", - __func__); + net_err_ratelimited("%s(), SAR not allowed for IrCOMM!\n", + __func__); goto out; } @@ -329,8 +317,6 @@ static void ircomm_ttp_disconnect_indication(void *instance, void *sap, struct ircomm_cb *self = (struct ircomm_cb *) instance; struct ircomm_info info; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); @@ -354,8 +340,6 @@ static void ircomm_ttp_flow_indication(void *instance, void *sap, { struct ircomm_cb *self = (struct ircomm_cb *) instance; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_MAGIC, return;); diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 61ceb4cdb4a2..40695b9751c1 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -147,7 +147,8 @@ static int __init ircomm_tty_init(void) return -ENOMEM; ircomm_tty = hashbin_new(HB_LOCK); if (ircomm_tty == NULL) { - IRDA_ERROR("%s(), can't allocate hashbin!\n", __func__); + net_err_ratelimited("%s(), can't allocate hashbin!\n", + __func__); put_tty_driver(driver); return -ENOMEM; } @@ -163,8 +164,8 @@ static int __init ircomm_tty_init(void) driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &ops); if (tty_register_driver(driver)) { - IRDA_ERROR("%s(): Couldn't register serial driver\n", - __func__); + net_err_ratelimited("%s(): Couldn't register serial driver\n", + __func__); put_tty_driver(driver); return -1; } @@ -173,8 +174,6 @@ static int __init ircomm_tty_init(void) static void __exit __ircomm_tty_cleanup(struct ircomm_tty_cb *self) { - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -195,12 +194,10 @@ static void __exit ircomm_tty_cleanup(void) { int ret; - IRDA_DEBUG(4, "%s()\n", __func__ ); - ret = tty_unregister_driver(driver); if (ret) { - IRDA_ERROR("%s(), failed to unregister driver\n", - __func__); + net_err_ratelimited("%s(), failed to unregister driver\n", + __func__); return; } @@ -219,14 +216,12 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self) notify_t notify; int ret = -ENODEV; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* Check if already open */ if (test_and_set_bit(ASYNCB_INITIALIZED, &self->port.flags)) { - IRDA_DEBUG(2, "%s(), already open so break out!\n", __func__ ); + pr_debug("%s(), already open so break out!\n", __func__); return 0; } @@ -256,7 +251,7 @@ static int ircomm_tty_startup(struct ircomm_tty_cb *self) /* Connect IrCOMM link with remote device */ ret = ircomm_tty_attach_cable(self); if (ret < 0) { - IRDA_ERROR("%s(), error attaching cable!\n", __func__); + net_err_ratelimited("%s(), error attaching cable!\n", __func__); goto err; } @@ -281,8 +276,6 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, int do_clocal = 0; unsigned long flags; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* * If non-blocking mode is set, or the port is not enabled, * then make the check up front and then exit. @@ -297,12 +290,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, if (tty->termios.c_cflag & CBAUD) tty_port_raise_dtr_rts(port); port->flags |= ASYNC_NORMAL_ACTIVE; - IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ ); + pr_debug("%s(), O_NONBLOCK requested!\n", __func__); return 0; } if (tty->termios.c_cflag & CLOCAL) { - IRDA_DEBUG(1, "%s(), doing CLOCAL!\n", __func__ ); + pr_debug("%s(), doing CLOCAL!\n", __func__); do_clocal = 1; } @@ -316,8 +309,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, retval = 0; add_wait_queue(&port->open_wait, &wait); - IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", - __FILE__, __LINE__, tty->driver->name, port->count); + pr_debug("%s(%d):block_til_ready before block on %s open_count=%d\n", + __FILE__, __LINE__, tty->driver->name, port->count); spin_lock_irqsave(&port->lock, flags); port->count--; @@ -354,8 +347,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, break; } - IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", - __FILE__, __LINE__, tty->driver->name, port->count); + pr_debug("%s(%d):block_til_ready blocking on %s open_count=%d\n", + __FILE__, __LINE__, tty->driver->name, port->count); schedule(); } @@ -369,8 +362,8 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self, port->blocked_open--; spin_unlock_irqrestore(&port->lock, flags); - IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", - __FILE__, __LINE__, tty->driver->name, port->count); + pr_debug("%s(%d):block_til_ready after blocking on %s open_count=%d\n", + __FILE__, __LINE__, tty->driver->name, port->count); if (!retval) port->flags |= ASYNC_NORMAL_ACTIVE; @@ -389,10 +382,8 @@ static int ircomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) if (!self) { /* No, so make new instance */ self = kzalloc(sizeof(struct ircomm_tty_cb), GFP_KERNEL); - if (self == NULL) { - IRDA_ERROR("%s(), kmalloc failed!\n", __func__); + if (self == NULL) return -ENOMEM; - } tty_port_init(&self->port); self->port.ops = &ircomm_port_ops; @@ -440,16 +431,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) unsigned long flags; int ret; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* ++ is not atomic, so this should be protected - Jean II */ spin_lock_irqsave(&self->port.lock, flags); self->port.count++; spin_unlock_irqrestore(&self->port.lock, flags); tty_port_tty_set(&self->port, tty); - IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, - self->line, self->port.count); + pr_debug("%s(), %s%d, count = %d\n", __func__ , tty->driver->name, + self->line, self->port.count); /* Not really used by us, but lets do it anyway */ self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0; @@ -469,8 +458,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) if (wait_event_interruptible(self->port.close_wait, !test_bit(ASYNCB_CLOSING, &self->port.flags))) { - IRDA_WARNING("%s - got signal while blocking on ASYNC_CLOSING!\n", - __func__); + net_warn_ratelimited("%s - got signal while blocking on ASYNC_CLOSING!\n", + __func__); return -ERESTARTSYS; } @@ -488,9 +477,9 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) self->settings.service_type = IRCOMM_9_WIRE; /* 9 wire as default */ /* Jan Kiszka -> add DSR/RI -> Conform to IrCOMM spec */ self->settings.dce = IRCOMM_CTS | IRCOMM_CD | IRCOMM_DSR | IRCOMM_RI; /* Default line settings */ - IRDA_DEBUG(2, "%s(), IrCOMM device\n", __func__ ); + pr_debug("%s(), IrCOMM device\n", __func__); } else { - IRDA_DEBUG(2, "%s(), IrLPT device\n", __func__ ); + pr_debug("%s(), IrLPT device\n", __func__); self->service_type = IRCOMM_3_WIRE_RAW; self->settings.service_type = IRCOMM_3_WIRE_RAW; /* Default */ } @@ -501,9 +490,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) ret = ircomm_tty_block_til_ready(self, tty, filp); if (ret) { - IRDA_DEBUG(2, - "%s(), returning after block_til_ready with %d\n", __func__ , - ret); + pr_debug("%s(), returning after block_til_ready with %d\n", + __func__, ret); return ret; } @@ -521,8 +509,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp) struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; struct tty_port *port = &self->port; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -572,8 +558,6 @@ static void ircomm_tty_do_softint(struct work_struct *work) unsigned long flags; struct sk_buff *skb, *ctrl_skb; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (!self || self->magic != IRCOMM_TTY_MAGIC) return; @@ -639,8 +623,8 @@ static int ircomm_tty_write(struct tty_struct *tty, int len = 0; int size; - IRDA_DEBUG(2, "%s(), count=%d, hw_stopped=%d\n", __func__ , count, - tty->hw_stopped); + pr_debug("%s(), count=%d, hw_stopped=%d\n", __func__ , count, + tty->hw_stopped); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); @@ -662,7 +646,7 @@ static int ircomm_tty_write(struct tty_struct *tty, * we don't mess up the original "safe skb" (see tx_data_size). * Jean II */ if (self->max_header_size == IRCOMM_TTY_HDR_UNINITIALISED) { - IRDA_DEBUG(1, "%s() : not initialised\n", __func__); + pr_debug("%s() : not initialised\n", __func__); #ifdef IRCOMM_NO_TX_BEFORE_INIT /* We didn't consume anything, TTY will retry */ return 0; @@ -791,7 +775,7 @@ static int ircomm_tty_write_room(struct tty_struct *tty) ret = self->max_data_size; spin_unlock_irqrestore(&self->spinlock, flags); } - IRDA_DEBUG(2, "%s(), ret=%d\n", __func__ , ret); + pr_debug("%s(), ret=%d\n", __func__ , ret); return ret; } @@ -808,8 +792,6 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) unsigned long orig_jiffies, poll_time; unsigned long flags; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -843,8 +825,6 @@ static void ircomm_tty_throttle(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -874,8 +854,6 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -889,7 +867,7 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty) self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS); ircomm_param_request(self, IRCOMM_DTE, TRUE); - IRDA_DEBUG(1, "%s(), FLOW_START\n", __func__ ); + pr_debug("%s(), FLOW_START\n", __func__); } ircomm_flow_request(self->ircomm, FLOW_START); } @@ -926,8 +904,6 @@ static void ircomm_tty_shutdown(struct ircomm_tty_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); - IRDA_DEBUG(0, "%s()\n", __func__ ); - if (!test_and_clear_bit(ASYNCB_INITIALIZED, &self->port.flags)) return; @@ -970,8 +946,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty) struct tty_port *port = &self->port; unsigned long flags; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -999,7 +973,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty) */ static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch) { - IRDA_DEBUG(0, "%s(), not impl\n", __func__ ); + pr_debug("%s(), not impl\n", __func__); } /* @@ -1043,8 +1017,6 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) struct tty_struct *tty; int status; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -1056,15 +1028,13 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) /*wake_up_interruptible(&self->delta_msr_wait);*/ } if ((self->port.flags & ASYNC_CHECK_CD) && (status & IRCOMM_DELTA_CD)) { - IRDA_DEBUG(2, - "%s(), ircomm%d CD now %s...\n", __func__ , self->line, - (status & IRCOMM_CD) ? "on" : "off"); + pr_debug("%s(), ircomm%d CD now %s...\n", __func__ , self->line, + (status & IRCOMM_CD) ? "on" : "off"); if (status & IRCOMM_CD) { wake_up_interruptible(&self->port.open_wait); } else { - IRDA_DEBUG(2, - "%s(), Doing serial hangup..\n", __func__ ); + pr_debug("%s(), Doing serial hangup..\n", __func__); if (tty) tty_hangup(tty); @@ -1075,8 +1045,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) if (tty && tty_port_cts_enabled(&self->port)) { if (tty->hw_stopped) { if (status & IRCOMM_CTS) { - IRDA_DEBUG(2, - "%s(), CTS tx start...\n", __func__ ); + pr_debug("%s(), CTS tx start...\n", __func__); tty->hw_stopped = 0; /* Wake up processes blocked on open */ @@ -1087,8 +1056,7 @@ void ircomm_tty_check_modem_status(struct ircomm_tty_cb *self) } } else { if (!(status & IRCOMM_CTS)) { - IRDA_DEBUG(2, - "%s(), CTS tx stop...\n", __func__ ); + pr_debug("%s(), CTS tx stop...\n", __func__); tty->hw_stopped = 1; } } @@ -1109,15 +1077,13 @@ static int ircomm_tty_data_indication(void *instance, void *sap, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; struct tty_struct *tty; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); tty = tty_port_tty_get(&self->port); if (!tty) { - IRDA_DEBUG(0, "%s(), no tty!\n", __func__ ); + pr_debug("%s(), no tty!\n", __func__); return 0; } @@ -1128,7 +1094,7 @@ static int ircomm_tty_data_indication(void *instance, void *sap, * params, we can just as well declare the hardware for running. */ if (tty->hw_stopped && (self->flow == FLOW_START)) { - IRDA_DEBUG(0, "%s(), polling for line settings!\n", __func__ ); + pr_debug("%s(), polling for line settings!\n", __func__); ircomm_param_request(self, IRCOMM_POLL, TRUE); /* We can just as well declare the hardware for running */ @@ -1161,8 +1127,6 @@ static int ircomm_tty_control_indication(void *instance, void *sap, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; int clen; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); @@ -1197,7 +1161,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, switch (cmd) { case FLOW_START: - IRDA_DEBUG(2, "%s(), hw start!\n", __func__ ); + pr_debug("%s(), hw start!\n", __func__); if (tty) tty->hw_stopped = 0; @@ -1206,7 +1170,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, break; default: /* If we get here, something is very wrong, better stop */ case FLOW_STOP: - IRDA_DEBUG(2, "%s(), hw stopped!\n", __func__ ); + pr_debug("%s(), hw stopped!\n", __func__); if (tty) tty->hw_stopped = 1; break; diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c index 2ee87bf387cc..61137f8b5293 100644 --- a/net/irda/ircomm/ircomm_tty_attach.c +++ b/net/irda/ircomm/ircomm_tty_attach.c @@ -89,8 +89,7 @@ const char *const ircomm_tty_state[] = { "*** ERROR *** ", }; -#ifdef CONFIG_IRDA_DEBUG -static const char *const ircomm_tty_event[] = { +static const char *const ircomm_tty_event[] __maybe_unused = { "IRCOMM_TTY_ATTACH_CABLE", "IRCOMM_TTY_DETACH_CABLE", "IRCOMM_TTY_DATA_REQUEST", @@ -106,7 +105,6 @@ static const char *const ircomm_tty_event[] = { "IRCOMM_TTY_GOT_LSAPSEL", "*** ERROR ****", }; -#endif /* CONFIG_IRDA_DEBUG */ static int (*state[])(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, struct sk_buff *skb, struct ircomm_tty_info *info) = @@ -130,14 +128,12 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) { struct tty_struct *tty; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); /* Check if somebody has already connected to us */ if (ircomm_is_connected(self->ircomm)) { - IRDA_DEBUG(0, "%s(), already connected!\n", __func__ ); + pr_debug("%s(), already connected!\n", __func__); return 0; } @@ -163,8 +159,6 @@ int ircomm_tty_attach_cable(struct ircomm_tty_cb *self) */ void ircomm_tty_detach_cable(struct ircomm_tty_cb *self) { - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -212,8 +206,6 @@ static void ircomm_tty_ias_register(struct ircomm_tty_cb *self) __u8 oct_seq[6]; __u16 hints; - IRDA_DEBUG(0, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -313,17 +305,17 @@ int ircomm_tty_send_initial_parameters(struct ircomm_tty_cb *self) * Set default values, but only if the application for some reason * haven't set them already */ - IRDA_DEBUG(2, "%s(), data-rate = %d\n", __func__ , - self->settings.data_rate); + pr_debug("%s(), data-rate = %d\n", __func__ , + self->settings.data_rate); if (!self->settings.data_rate) self->settings.data_rate = 9600; - IRDA_DEBUG(2, "%s(), data-format = %d\n", __func__ , - self->settings.data_format); + pr_debug("%s(), data-format = %d\n", __func__ , + self->settings.data_format); if (!self->settings.data_format) self->settings.data_format = IRCOMM_WSIZE_8; /* 8N1 */ - IRDA_DEBUG(2, "%s(), flow-control = %d\n", __func__ , - self->settings.flow_control); + pr_debug("%s(), flow-control = %d\n", __func__ , + self->settings.flow_control); /*self->settings.flow_control = IRCOMM_RTS_CTS_IN|IRCOMM_RTS_CTS_OUT;*/ /* Do not set delta values for the initial parameters */ @@ -367,8 +359,6 @@ static void ircomm_tty_discovery_indication(discinfo_t *discovery, struct ircomm_tty_cb *self; struct ircomm_tty_info info; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Important note : * We need to drop all passive discoveries. * The LSAP management of IrComm is deficient and doesn't deal @@ -404,8 +394,6 @@ void ircomm_tty_disconnect_indication(void *instance, void *sap, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; struct tty_struct *tty; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -436,8 +424,6 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) priv; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -447,13 +433,13 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(4, "%s(), got NULL value!\n", __func__ ); + pr_debug("%s(), got NULL value!\n", __func__); return; } switch (value->type) { case IAS_OCT_SEQ: - IRDA_DEBUG(2, "%s(), got octet sequence\n", __func__ ); + pr_debug("%s(), got octet sequence\n", __func__); irda_param_extract_all(self, value->t.oct_seq, value->len, &ircomm_param_info); @@ -463,21 +449,21 @@ static void ircomm_tty_getvalue_confirm(int result, __u16 obj_id, break; case IAS_INTEGER: /* Got LSAP selector */ - IRDA_DEBUG(2, "%s(), got lsapsel = %d\n", __func__ , - value->t.integer); + pr_debug("%s(), got lsapsel = %d\n", __func__ , + value->t.integer); if (value->t.integer == -1) { - IRDA_DEBUG(0, "%s(), invalid value!\n", __func__ ); + pr_debug("%s(), invalid value!\n", __func__); } else self->dlsap_sel = value->t.integer; ircomm_tty_do_event(self, IRCOMM_TTY_GOT_LSAPSEL, NULL, NULL); break; case IAS_MISSING: - IRDA_DEBUG(0, "%s(), got IAS_MISSING\n", __func__ ); + pr_debug("%s(), got IAS_MISSING\n", __func__); break; default: - IRDA_DEBUG(0, "%s(), got unknown type!\n", __func__ ); + pr_debug("%s(), got unknown type!\n", __func__); break; } irias_delete_value(value); @@ -497,8 +483,6 @@ void ircomm_tty_connect_confirm(void *instance, void *sap, { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -528,8 +512,6 @@ void ircomm_tty_connect_indication(void *instance, void *sap, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; int clen; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -559,8 +541,6 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self) { struct tty_struct *tty; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -578,10 +558,10 @@ void ircomm_tty_link_established(struct ircomm_tty_cb *self) */ if (tty_port_cts_enabled(&self->port) && ((self->settings.dce & IRCOMM_CTS) == 0)) { - IRDA_DEBUG(0, "%s(), waiting for CTS ...\n", __func__ ); + pr_debug("%s(), waiting for CTS ...\n", __func__); goto put; } else { - IRDA_DEBUG(1, "%s(), starting hardware!\n", __func__ ); + pr_debug("%s(), starting hardware!\n", __func__); tty->hw_stopped = 0; @@ -621,8 +601,6 @@ static void ircomm_tty_watchdog_timer_expired(void *data) { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); @@ -642,8 +620,8 @@ int ircomm_tty_do_event(struct ircomm_tty_cb *self, IRCOMM_TTY_EVENT event, IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return -1;); - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); return (*state[self->state])(self, event, skb, info); } @@ -660,8 +638,8 @@ static inline void ircomm_tty_next_state(struct ircomm_tty_cb *self, IRCOMM_TTY_ IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;); - IRDA_DEBUG(2, "%s: next state=%s, service type=%d\n", __func__ , - ircomm_tty_state[self->state], self->service_type); + pr_debug("%s: next state=%s, service type=%d\n", __func__ , + ircomm_tty_state[self->state], self->service_type); */ self->state = state; } @@ -679,8 +657,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_ATTACH_CABLE: /* Try to discover any remote devices */ @@ -694,8 +672,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, self->saddr = info->saddr; if (self->iriap) { - IRDA_WARNING("%s(), busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(), busy with a previous query\n", + __func__); return -EBUSY; } @@ -723,8 +701,8 @@ static int ircomm_tty_state_idle(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -743,8 +721,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_DISCOVERY_INDICATION: @@ -752,8 +730,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, self->saddr = info->saddr; if (self->iriap) { - IRDA_WARNING("%s(), busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(), busy with a previous query\n", + __func__); return -EBUSY; } @@ -796,8 +774,8 @@ static int ircomm_tty_state_search(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -816,14 +794,14 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_GOT_PARAMETERS: if (self->iriap) { - IRDA_WARNING("%s(), busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(), busy with a previous query\n", + __func__); return -EBUSY; } @@ -854,8 +832,8 @@ static int ircomm_tty_state_query_parameters(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -874,8 +852,8 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_GOT_LSAPSEL: @@ -903,8 +881,8 @@ static int ircomm_tty_state_query_lsap_sel(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -923,8 +901,8 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, { int ret = 0; - IRDA_DEBUG(2, "%s: state=%s, event=%s\n", __func__ , - ircomm_tty_state[self->state], ircomm_tty_event[event]); + pr_debug("%s: state=%s, event=%s\n", __func__ , + ircomm_tty_state[self->state], ircomm_tty_event[event]); switch (event) { case IRCOMM_TTY_CONNECT_CONFIRM: @@ -957,8 +935,8 @@ static int ircomm_tty_state_setup(struct ircomm_tty_cb *self, ircomm_tty_next_state(self, IRCOMM_TTY_IDLE); break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; @@ -995,13 +973,13 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self, self->settings.dce = IRCOMM_DELTA_CD; ircomm_tty_check_modem_status(self); } else { - IRDA_DEBUG(0, "%s(), hanging up!\n", __func__ ); + pr_debug("%s(), hanging up!\n", __func__); tty_port_tty_hangup(&self->port, false); } break; default: - IRDA_DEBUG(2, "%s(), unknown event: %s\n", __func__ , - ircomm_tty_event[event]); + pr_debug("%s(), unknown event: %s\n", __func__ , + ircomm_tty_event[event]); ret = -EINVAL; } return ret; diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c index ce943853c38d..75ccdbd0728e 100644 --- a/net/irda/ircomm/ircomm_tty_ioctl.c +++ b/net/irda/ircomm/ircomm_tty_ioctl.c @@ -56,8 +56,6 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self, unsigned int cflag, cval; int baud; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (!self->ircomm) return; @@ -93,7 +91,8 @@ static void ircomm_tty_change_speed(struct ircomm_tty_cb *self, self->settings.flow_control |= IRCOMM_RTS_CTS_IN; /* This got me. Bummer. Jean II */ if (self->service_type == IRCOMM_3_WIRE_RAW) - IRDA_WARNING("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", __func__); + net_warn_ratelimited("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n", + __func__); } else { self->port.flags &= ~ASYNC_CTS_FLOW; self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN; @@ -149,8 +148,6 @@ void ircomm_tty_set_termios(struct tty_struct *tty, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int cflag = tty->termios.c_cflag; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if ((cflag == old_termios->c_cflag) && (RELEVANT_IFLAG(tty->termios.c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) @@ -198,8 +195,6 @@ int ircomm_tty_tiocmget(struct tty_struct *tty) struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; unsigned int result; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; @@ -223,8 +218,6 @@ int ircomm_tty_tiocmset(struct tty_struct *tty, { struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; @@ -265,8 +258,6 @@ static int ircomm_tty_get_serial_info(struct ircomm_tty_cb *self, if (!retinfo) return -EFAULT; - IRDA_DEBUG(2, "%s()\n", __func__ ); - memset(&info, 0, sizeof(info)); info.line = self->line; info.flags = self->port.flags; @@ -301,8 +292,6 @@ static int ircomm_tty_set_serial_info(struct ircomm_tty_cb *self, struct serial_struct new_serial; struct ircomm_tty_cb old_state, *state; - IRDA_DEBUG(0, "%s()\n", __func__ ); - if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) return -EFAULT; @@ -375,8 +364,6 @@ int ircomm_tty_ioctl(struct tty_struct *tty, struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) tty->driver_data; int ret = 0; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGSTRUCT) && (cmd != TIOCMIWAIT) && (cmd != TIOCGICOUNT)) { @@ -392,11 +379,11 @@ int ircomm_tty_ioctl(struct tty_struct *tty, ret = ircomm_tty_set_serial_info(self, (struct serial_struct __user *) arg); break; case TIOCMIWAIT: - IRDA_DEBUG(0, "(), TIOCMIWAIT, not impl!\n"); + pr_debug("(), TIOCMIWAIT, not impl!\n"); break; case TIOCGICOUNT: - IRDA_DEBUG(0, "%s(), TIOCGICOUNT not impl!\n", __func__ ); + pr_debug("%s(), TIOCGICOUNT not impl!\n", __func__); #if 0 save_flags(flags); cli(); cnow = driver->icount; diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index 9e0d909390fd..856736656a30 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c @@ -63,14 +63,14 @@ int __init irda_device_init( void) { dongles = hashbin_new(HB_NOLOCK); if (dongles == NULL) { - IRDA_WARNING("IrDA: Can't allocate dongles hashbin!\n"); + net_warn_ratelimited("IrDA: Can't allocate dongles hashbin!\n"); return -ENOMEM; } spin_lock_init(&dongles->hb_spinlock); tasks = hashbin_new(HB_LOCK); if (tasks == NULL) { - IRDA_WARNING("IrDA: Can't allocate tasks hashbin!\n"); + net_warn_ratelimited("IrDA: Can't allocate tasks hashbin!\n"); hashbin_delete(dongles, NULL); return -ENOMEM; } @@ -84,14 +84,12 @@ int __init irda_device_init( void) static void leftover_dongle(void *arg) { struct dongle_reg *reg = arg; - IRDA_WARNING("IrDA: Dongle type %x not unregistered\n", - reg->type); + net_warn_ratelimited("IrDA: Dongle type %x not unregistered\n", + reg->type); } void irda_device_cleanup(void) { - IRDA_DEBUG(4, "%s()\n", __func__); - hashbin_delete(tasks, (FREE_FUNC) __irda_task_delete); hashbin_delete(dongles, leftover_dongle); @@ -107,7 +105,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status) { struct irlap_cb *self; - IRDA_DEBUG(4, "%s(%s)\n", __func__, status ? "TRUE" : "FALSE"); + pr_debug("%s(%s)\n", __func__, status ? "TRUE" : "FALSE"); self = (struct irlap_cb *) dev->atalk_ptr; @@ -127,7 +125,7 @@ void irda_device_set_media_busy(struct net_device *dev, int status) irlap_start_mbusy_timer(self, SMALLBUSY_TIMEOUT); else irlap_start_mbusy_timer(self, MEDIABUSY_TIMEOUT); - IRDA_DEBUG( 4, "Media busy!\n"); + pr_debug("Media busy!\n"); } else { self->media_busy = FALSE; irlap_stop_mbusy_timer(self); @@ -147,11 +145,9 @@ int irda_device_is_receiving(struct net_device *dev) struct if_irda_req req; int ret; - IRDA_DEBUG(2, "%s()\n", __func__); - if (!dev->netdev_ops->ndo_do_ioctl) { - IRDA_ERROR("%s: do_ioctl not impl. by device driver\n", - __func__); + net_err_ratelimited("%s: do_ioctl not impl. by device driver\n", + __func__); return -1; } @@ -192,8 +188,6 @@ static int irda_task_kick(struct irda_task *task) int count = 0; int timeout; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(task != NULL, return -1;); IRDA_ASSERT(task->magic == IRDA_TASK_MAGIC, return -1;); @@ -201,15 +195,15 @@ static int irda_task_kick(struct irda_task *task) do { timeout = task->function(task); if (count++ > 100) { - IRDA_ERROR("%s: error in task handler!\n", - __func__); + net_err_ratelimited("%s: error in task handler!\n", + __func__); irda_task_delete(task); return TRUE; } } while ((timeout == 0) && (task->state != IRDA_TASK_DONE)); if (timeout < 0) { - IRDA_ERROR("%s: Error executing task!\n", __func__); + net_err_ratelimited("%s: Error executing task!\n", __func__); irda_task_delete(task); return TRUE; } @@ -241,8 +235,8 @@ static int irda_task_kick(struct irda_task *task) irda_task_timer_expired); finished = FALSE; } else { - IRDA_DEBUG(0, "%s(), not finished, and no timeout!\n", - __func__); + pr_debug("%s(), not finished, and no timeout!\n", + __func__); finished = FALSE; } @@ -259,8 +253,6 @@ static void irda_task_timer_expired(void *data) { struct irda_task *task; - IRDA_DEBUG(2, "%s()\n", __func__); - task = data; irda_task_kick(task); diff --git a/net/irda/iriap.c b/net/irda/iriap.c index e1b37f5a2691..4a7ae32afa09 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -43,9 +43,8 @@ #include <net/irda/iriap_event.h> #include <net/irda/iriap.h> -#ifdef CONFIG_IRDA_DEBUG /* FIXME: This one should go in irlmp.c */ -static const char *const ias_charset_types[] = { +static const char *const ias_charset_types[] __maybe_unused = { "CS_ASCII", "CS_ISO_8859_1", "CS_ISO_8859_2", @@ -58,7 +57,6 @@ static const char *const ias_charset_types[] = { "CS_ISO_8859_9", "CS_UNICODE" }; -#endif /* CONFIG_IRDA_DEBUG */ static hashbin_t *iriap = NULL; static void *service_handle; @@ -110,8 +108,8 @@ int __init iriap_init(void) /* Object repository - defined in irias_object.c */ irias_objects = hashbin_new(HB_LOCK); if (!irias_objects) { - IRDA_WARNING("%s: Can't allocate irias_objects hashbin!\n", - __func__); + net_warn_ratelimited("%s: Can't allocate irias_objects hashbin!\n", + __func__); hashbin_delete(iriap, NULL); return -ENOMEM; } @@ -145,7 +143,7 @@ int __init iriap_init(void) */ server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); if (!server) { - IRDA_DEBUG(0, "%s(), unable to open server\n", __func__); + pr_debug("%s(), unable to open server\n", __func__); return -1; } iriap_register_lsap(server, LSAP_IAS, IAS_SERVER); @@ -177,13 +175,9 @@ struct iriap_cb *iriap_open(__u8 slsap_sel, int mode, void *priv, { struct iriap_cb *self; - IRDA_DEBUG(2, "%s()\n", __func__); - self = kzalloc(sizeof(*self), GFP_ATOMIC); - if (!self) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (!self) return NULL; - } /* * Initialize instance @@ -223,8 +217,6 @@ EXPORT_SYMBOL(iriap_open); */ static void __iriap_close(struct iriap_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); @@ -247,8 +239,6 @@ void iriap_close(struct iriap_cb *self) { struct iriap_cb *entry; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); @@ -268,8 +258,6 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode) { notify_t notify; - IRDA_DEBUG(2, "%s()\n", __func__); - irda_notify_init(¬ify); notify.connect_confirm = iriap_connect_confirm; notify.connect_indication = iriap_connect_indication; @@ -283,7 +271,8 @@ static int iriap_register_lsap(struct iriap_cb *self, __u8 slsap_sel, int mode) self->lsap = irlmp_open_lsap(slsap_sel, ¬ify, 0); if (self->lsap == NULL) { - IRDA_ERROR("%s: Unable to allocated LSAP!\n", __func__); + net_err_ratelimited("%s: Unable to allocated LSAP!\n", + __func__); return -1; } self->slsap_sel = self->lsap->slsap_sel; @@ -303,8 +292,8 @@ static void iriap_disconnect_indication(void *instance, void *sap, { struct iriap_cb *self; - IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__, - irlmp_reason_str(reason), reason); + pr_debug("%s(), reason=%s [%d]\n", __func__, + irlmp_reason_str(reason), reason); self = instance; @@ -320,7 +309,7 @@ static void iriap_disconnect_indication(void *instance, void *sap, dev_kfree_skb(skb); if (self->mode == IAS_CLIENT) { - IRDA_DEBUG(4, "%s(), disconnect as client\n", __func__); + pr_debug("%s(), disconnect as client\n", __func__); iriap_do_client_event(self, IAP_LM_DISCONNECT_INDICATION, @@ -333,7 +322,7 @@ static void iriap_disconnect_indication(void *instance, void *sap, if (self->confirm) self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); } else { - IRDA_DEBUG(4, "%s(), disconnect as server\n", __func__); + pr_debug("%s(), disconnect as server\n", __func__); iriap_do_server_event(self, IAP_LM_DISCONNECT_INDICATION, NULL); iriap_close(self); @@ -347,16 +336,13 @@ static void iriap_disconnect_request(struct iriap_cb *self) { struct sk_buff *tx_skb; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); if (tx_skb == NULL) { - IRDA_DEBUG(0, - "%s(), Could not allocate an sk_buff of length %d\n", - __func__, LMP_MAX_HEADER); + pr_debug("%s(), Could not allocate an sk_buff of length %d\n", + __func__, LMP_MAX_HEADER); return; } @@ -461,14 +447,14 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, len = get_unaligned_be16(fp + n); n += 2; - IRDA_DEBUG(4, "%s(), len=%d\n", __func__, len); + pr_debug("%s(), len=%d\n", __func__, len); /* Get object ID, MSB first */ obj_id = get_unaligned_be16(fp + n); n += 2; type = fp[n++]; - IRDA_DEBUG(4, "%s(), Value type = %d\n", __func__, type); + pr_debug("%s(), Value type = %d\n", __func__, type); switch (type) { case IAS_INTEGER: @@ -477,7 +463,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, value = irias_new_integer_value(tmp_cpu32); /* Legal values restricted to 0x01-0x6f, page 15 irttp */ - IRDA_DEBUG(4, "%s(), lsap=%d\n", __func__, value->t.integer); + pr_debug("%s(), lsap=%d\n", __func__, value->t.integer); break; case IAS_STRING: charset = fp[n++]; @@ -496,11 +482,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, /* case CS_ISO_8859_9: */ /* case CS_UNICODE: */ default: - IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n", - __func__, charset, - charset < ARRAY_SIZE(ias_charset_types) ? - ias_charset_types[charset] : - "(unknown)"); + pr_debug("%s(), charset [%d] %s, not supported\n", + __func__, charset, + charset < ARRAY_SIZE(ias_charset_types) ? + ias_charset_types[charset] : + "(unknown)"); /* Aborting, close connection! */ iriap_disconnect_request(self); @@ -508,12 +494,12 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, /* break; */ } value_len = fp[n++]; - IRDA_DEBUG(4, "%s(), strlen=%d\n", __func__, value_len); + pr_debug("%s(), strlen=%d\n", __func__, value_len); /* Make sure the string is null-terminated */ if (n + value_len < skb->len) fp[n + value_len] = 0x00; - IRDA_DEBUG(4, "Got string %s\n", fp+n); + pr_debug("Got string %s\n", fp+n); /* Will truncate to IAS_MAX_STRING bytes */ value = irias_new_string_value(fp+n); @@ -539,7 +525,7 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self, if (self->confirm) self->confirm(IAS_SUCCESS, obj_id, value, self->priv); else { - IRDA_DEBUG(0, "%s(), missing handler!\n", __func__); + pr_debug("%s(), missing handler!\n", __func__); irias_delete_value(value); } } @@ -561,8 +547,6 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self, __be16 tmp_be16; __u8 *fp; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); IRDA_ASSERT(value != NULL, return;); @@ -623,12 +607,12 @@ static void iriap_getvaluebyclass_response(struct iriap_cb *self, memcpy(fp+n, value->t.oct_seq, value->len); n+=value->len; break; case IAS_MISSING: - IRDA_DEBUG( 3, "%s: sending IAS_MISSING\n", __func__); + pr_debug("%s: sending IAS_MISSING\n", __func__); skb_put(tx_skb, 1); fp[n++] = value->type; break; default: - IRDA_DEBUG(0, "%s(), type not implemented!\n", __func__); + pr_debug("%s(), type not implemented!\n", __func__); break; } iriap_do_r_connect_event(self, IAP_CALL_RESPONSE, tx_skb); @@ -655,8 +639,6 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, __u8 *fp; int n; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -678,20 +660,20 @@ static void iriap_getvaluebyclass_indication(struct iriap_cb *self, memcpy(attr, fp+n, attr_len); n+=attr_len; attr[attr_len] = '\0'; - IRDA_DEBUG(4, "LM-IAS: Looking up %s: %s\n", name, attr); + pr_debug("LM-IAS: Looking up %s: %s\n", name, attr); obj = irias_find_object(name); if (obj == NULL) { - IRDA_DEBUG(2, "LM-IAS: Object %s not found\n", name); + pr_debug("LM-IAS: Object %s not found\n", name); iriap_getvaluebyclass_response(self, 0x1235, IAS_CLASS_UNKNOWN, &irias_missing); return; } - IRDA_DEBUG(4, "LM-IAS: found %s, id=%d\n", obj->name, obj->id); + pr_debug("LM-IAS: found %s, id=%d\n", obj->name, obj->id); attrib = irias_find_attrib(obj, attr); if (attrib == NULL) { - IRDA_DEBUG(2, "LM-IAS: Attribute %s not found\n", attr); + pr_debug("LM-IAS: Attribute %s not found\n", attr); iriap_getvaluebyclass_response(self, obj->id, IAS_ATTRIB_UNKNOWN, &irias_missing); @@ -714,8 +696,6 @@ void iriap_send_ack(struct iriap_cb *self) struct sk_buff *tx_skb; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); @@ -745,7 +725,7 @@ void iriap_connect_request(struct iriap_cb *self) self->saddr, self->daddr, NULL, NULL); if (ret < 0) { - IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); + pr_debug("%s(), connect failed!\n", __func__); self->confirm(IAS_DISCONNECT, 0, NULL, self->priv); } } @@ -793,8 +773,6 @@ static void iriap_connect_indication(void *instance, void *sap, { struct iriap_cb *self, *new; - IRDA_DEBUG(1, "%s()\n", __func__); - self = instance; IRDA_ASSERT(skb != NULL, return;); @@ -804,14 +782,14 @@ static void iriap_connect_indication(void *instance, void *sap, /* Start new server */ new = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL); if (!new) { - IRDA_DEBUG(0, "%s(), open failed\n", __func__); + pr_debug("%s(), open failed\n", __func__); goto out; } /* Now attach up the new "socket" */ new->lsap = irlmp_dup(self->lsap, new); if (!new->lsap) { - IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); + pr_debug("%s(), dup failed!\n", __func__); goto out; } @@ -841,8 +819,6 @@ static int iriap_data_indication(void *instance, void *sap, __u8 *frame; __u8 opcode; - IRDA_DEBUG(3, "%s()\n", __func__); - self = instance; IRDA_ASSERT(skb != NULL, return 0;); @@ -853,21 +829,20 @@ static int iriap_data_indication(void *instance, void *sap, if (self->mode == IAS_SERVER) { /* Call server */ - IRDA_DEBUG(4, "%s(), Calling server!\n", __func__); + pr_debug("%s(), Calling server!\n", __func__); iriap_do_r_connect_event(self, IAP_RECV_F_LST, skb); goto out; } opcode = frame[0]; if (~opcode & IAP_LST) { - IRDA_WARNING("%s:, IrIAS multiframe commands or " - "results is not implemented yet!\n", - __func__); + net_warn_ratelimited("%s:, IrIAS multiframe commands or results is not implemented yet!\n", + __func__); goto out; } /* Check for ack frames since they don't contain any data */ if (opcode & IAP_ACK) { - IRDA_DEBUG(0, "%s() Got ack frame!\n", __func__); + pr_debug("%s() Got ack frame!\n", __func__); goto out; } @@ -875,7 +850,7 @@ static int iriap_data_indication(void *instance, void *sap, switch (opcode) { case GET_INFO_BASE: - IRDA_DEBUG(0, "IrLMP GetInfoBaseDetails not implemented!\n"); + pr_debug("IrLMP GetInfoBaseDetails not implemented!\n"); break; case GET_VALUE_BY_CLASS: iriap_do_call_event(self, IAP_RECV_F_LST, NULL); @@ -885,7 +860,7 @@ static int iriap_data_indication(void *instance, void *sap, iriap_getvaluebyclass_confirm(self, skb); break; case IAS_CLASS_UNKNOWN: - IRDA_DEBUG(1, "%s(), No such class!\n", __func__); + pr_debug("%s(), No such class!\n", __func__); /* Finished, close connection! */ iriap_disconnect_request(self); @@ -898,7 +873,7 @@ static int iriap_data_indication(void *instance, void *sap, self->priv); break; case IAS_ATTRIB_UNKNOWN: - IRDA_DEBUG(1, "%s(), No such attribute!\n", __func__); + pr_debug("%s(), No such attribute!\n", __func__); /* Finished, close connection! */ iriap_disconnect_request(self); @@ -913,8 +888,8 @@ static int iriap_data_indication(void *instance, void *sap, } break; default: - IRDA_DEBUG(0, "%s(), Unknown op-code: %02x\n", __func__, - opcode); + pr_debug("%s(), Unknown op-code: %02x\n", __func__, + opcode); break; } @@ -935,8 +910,6 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb) __u8 *fp; __u8 opcode; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -945,16 +918,16 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb) opcode = fp[0]; if (~opcode & 0x80) { - IRDA_WARNING("%s: IrIAS multiframe commands or results " - "is not implemented yet!\n", __func__); + net_warn_ratelimited("%s: IrIAS multiframe commands or results is not implemented yet!\n", + __func__); return; } opcode &= 0x7f; /* Mask away LST bit */ switch (opcode) { case GET_INFO_BASE: - IRDA_WARNING("%s: GetInfoBaseDetails not implemented yet!\n", - __func__); + net_warn_ratelimited("%s: GetInfoBaseDetails not implemented yet!\n", + __func__); break; case GET_VALUE_BY_CLASS: iriap_getvaluebyclass_indication(self, skb); diff --git a/net/irda/iriap_event.c b/net/irda/iriap_event.c index 703774e29e32..e6098b2e048a 100644 --- a/net/irda/iriap_event.c +++ b/net/irda/iriap_event.c @@ -187,7 +187,7 @@ static void state_s_disconnect(struct iriap_cb *self, IRIAP_EVENT event, case IAP_LM_DISCONNECT_INDICATION: break; default: - IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); break; } } @@ -219,7 +219,7 @@ static void state_s_connecting(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_client_state(self, S_DISCONNECT); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); break; } } @@ -243,7 +243,7 @@ static void state_s_call(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_client_state(self, S_DISCONNECT); break; default: - IRDA_DEBUG(0, "state_s_call: Unknown event %d\n", event); + pr_debug("state_s_call: Unknown event %d\n", event); break; } } @@ -271,7 +271,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_call_state(self, S_OUTSTANDING); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); break; } } @@ -285,7 +285,7 @@ static void state_s_make_call(struct iriap_cb *self, IRIAP_EVENT event, static void state_s_calling(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } /* @@ -307,7 +307,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_call_state(self, S_WAIT_FOR_CALL); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); break; } } @@ -320,7 +320,7 @@ static void state_s_outstanding(struct iriap_cb *self, IRIAP_EVENT event, static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } /* @@ -332,7 +332,7 @@ static void state_s_replying(struct iriap_cb *self, IRIAP_EVENT event, static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } @@ -345,7 +345,7 @@ static void state_s_wait_for_call(struct iriap_cb *self, IRIAP_EVENT event, static void state_s_wait_active(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } /************************************************************************** @@ -368,10 +368,8 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, switch (event) { case IAP_LM_CONNECT_INDICATION: tx_skb = alloc_skb(LMP_MAX_HEADER, GFP_ATOMIC); - if (tx_skb == NULL) { - IRDA_WARNING("%s: unable to malloc!\n", __func__); + if (tx_skb == NULL) return; - } /* Reserve space for MUX_CONTROL and LAP header */ skb_reserve(tx_skb, LMP_MAX_HEADER); @@ -388,7 +386,7 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_r_connect_state(self, R_RECEIVING); break; default: - IRDA_DEBUG(0, "%s(), unknown event %d\n", __func__, event); + pr_debug("%s(), unknown event %d\n", __func__, event); break; } } @@ -399,8 +397,6 @@ static void state_r_disconnect(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - switch (event) { case IAP_LM_DISCONNECT_INDICATION: /* Abort call */ @@ -408,7 +404,7 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, iriap_next_r_connect_state(self, R_WAITING); break; default: - IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); + pr_debug("%s(), unknown event!\n", __func__); break; } } @@ -423,13 +419,13 @@ static void state_r_call(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_waiting(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), Not implemented\n", __func__); + pr_debug("%s(), Not implemented\n", __func__); } /* @@ -441,8 +437,6 @@ static void state_r_wait_active(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - switch (event) { case IAP_RECV_F_LST: iriap_next_r_connect_state(self, R_EXECUTE); @@ -450,7 +444,7 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, iriap_call_indication(self, skb); break; default: - IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); + pr_debug("%s(), unknown event!\n", __func__); break; } } @@ -464,8 +458,6 @@ static void state_r_receiving(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IAS_MAGIC, return;); @@ -485,7 +477,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, irlmp_data_request(self->lsap, skb); break; default: - IRDA_DEBUG(0, "%s(), unknown event!\n", __func__); + pr_debug("%s(), unknown event!\n", __func__); break; } } @@ -493,7 +485,7 @@ static void state_r_execute(struct iriap_cb *self, IRIAP_EVENT event, static void state_r_returning(struct iriap_cb *self, IRIAP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(0, "%s(), event=%d\n", __func__, event); + pr_debug("%s(), event=%d\n", __func__, event); switch (event) { case IAP_RECV_F_LST: diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c index f07ed9fd5792..53b86d0e1630 100644 --- a/net/irda/irias_object.c +++ b/net/irda/irias_object.c @@ -48,20 +48,18 @@ struct ias_object *irias_new_object( char *name, int id) { struct ias_object *obj; - IRDA_DEBUG( 4, "%s()\n", __func__); - obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC); if (obj == NULL) { - IRDA_WARNING("%s(), Unable to allocate object!\n", - __func__); + net_warn_ratelimited("%s(), Unable to allocate object!\n", + __func__); return NULL; } obj->magic = IAS_OBJECT_MAGIC; obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC); if (!obj->name) { - IRDA_WARNING("%s(), Unable to allocate name!\n", - __func__); + net_warn_ratelimited("%s(), Unable to allocate name!\n", + __func__); kfree(obj); return NULL; } @@ -73,8 +71,8 @@ struct ias_object *irias_new_object( char *name, int id) obj->attribs = hashbin_new(HB_LOCK); if (obj->attribs == NULL) { - IRDA_WARNING("%s(), Unable to allocate attribs!\n", - __func__); + net_warn_ratelimited("%s(), Unable to allocate attribs!\n", + __func__); kfree(obj->name); kfree(obj); return NULL; @@ -134,8 +132,8 @@ int irias_delete_object(struct ias_object *obj) /* Remove from list */ node = hashbin_remove_this(irias_objects, (irda_queue_t *) obj); if (!node) - IRDA_DEBUG( 0, "%s(), object already removed!\n", - __func__); + pr_debug("%s(), object already removed!\n", + __func__); /* Destroy */ __irias_delete_object(obj); @@ -269,8 +267,8 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name, /* Find object */ obj = hashbin_lock_find(irias_objects, 0, obj_name); if (obj == NULL) { - IRDA_WARNING("%s: Unable to find object: %s\n", __func__, - obj_name); + net_warn_ratelimited("%s: Unable to find object: %s\n", + __func__, obj_name); return -1; } @@ -280,15 +278,15 @@ int irias_object_change_attribute(char *obj_name, char *attrib_name, /* Find attribute */ attrib = hashbin_find(obj->attribs, 0, attrib_name); if (attrib == NULL) { - IRDA_WARNING("%s: Unable to find attribute: %s\n", - __func__, attrib_name); + net_warn_ratelimited("%s: Unable to find attribute: %s\n", + __func__, attrib_name); spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); return -1; } if ( attrib->value->type != new_value->type) { - IRDA_DEBUG( 0, "%s(), changing value type not allowed!\n", - __func__); + pr_debug("%s(), changing value type not allowed!\n", + __func__); spin_unlock_irqrestore(&obj->attribs->hb_spinlock, flags); return -1; } @@ -322,8 +320,8 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); return; } @@ -333,8 +331,8 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value, /* Insert value */ attrib->value = irias_new_integer_value(value); if (!attrib->name || !attrib->value) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); @@ -366,8 +364,8 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, attrib = kzalloc(sizeof(struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); return; } @@ -376,8 +374,8 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets, attrib->value = irias_new_octseq_value( octets, len); if (!attrib->name || !attrib->value) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); @@ -408,8 +406,8 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, attrib = kzalloc(sizeof( struct ias_attrib), GFP_ATOMIC); if (attrib == NULL) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); return; } @@ -418,8 +416,8 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value, attrib->value = irias_new_string_value(value); if (!attrib->name || !attrib->value) { - IRDA_WARNING("%s: Unable to allocate attribute!\n", - __func__); + net_warn_ratelimited("%s: Unable to allocate attribute!\n", + __func__); if (attrib->value) irias_delete_value(attrib->value); kfree(attrib->name); @@ -442,10 +440,8 @@ struct ias_value *irias_new_integer_value(int integer) struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); - if (value == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (value == NULL) return NULL; - } value->type = IAS_INTEGER; value->len = 4; @@ -467,16 +463,14 @@ struct ias_value *irias_new_string_value(char *string) struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); - if (value == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (value == NULL) return NULL; - } value->type = IAS_STRING; value->charset = CS_ASCII; value->t.string = kstrndup(string, IAS_MAX_STRING, GFP_ATOMIC); if (!value->t.string) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + net_warn_ratelimited("%s: Unable to kmalloc!\n", __func__); kfree(value); return NULL; } @@ -498,10 +492,8 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); - if (value == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (value == NULL) return NULL; - } value->type = IAS_OCT_SEQ; /* Check length */ @@ -511,7 +503,7 @@ struct ias_value *irias_new_octseq_value(__u8 *octseq , int len) value->t.oct_seq = kmemdup(octseq, len, GFP_ATOMIC); if (value->t.oct_seq == NULL){ - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + net_warn_ratelimited("%s: Unable to kmalloc!\n", __func__); kfree(value); return NULL; } @@ -523,10 +515,8 @@ struct ias_value *irias_new_missing_value(void) struct ias_value *value; value = kzalloc(sizeof(struct ias_value), GFP_ATOMIC); - if (value == NULL) { - IRDA_WARNING("%s: Unable to kmalloc!\n", __func__); + if (value == NULL) return NULL; - } value->type = IAS_MISSING; @@ -541,8 +531,6 @@ struct ias_value *irias_new_missing_value(void) */ void irias_delete_value(struct ias_value *value) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(value != NULL, return;); switch (value->type) { @@ -559,7 +547,7 @@ void irias_delete_value(struct ias_value *value) kfree(value->t.oct_seq); break; default: - IRDA_DEBUG(0, "%s(), Unknown value type!\n", __func__); + pr_debug("%s(), Unknown value type!\n", __func__); break; } kfree(value); diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c index 42cf1390ce9c..c5837a40c78e 100644 --- a/net/irda/irlan/irlan_client.c +++ b/net/irda/irlan/irlan_client.c @@ -72,8 +72,6 @@ static void irlan_client_kick_timer_expired(void *data) { struct irlan_cb *self = (struct irlan_cb *) data; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -91,8 +89,6 @@ static void irlan_client_kick_timer_expired(void *data) static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - irda_start_timer(&self->client.kick_timer, timeout, (void *) self, irlan_client_kick_timer_expired); } @@ -105,8 +101,6 @@ static void irlan_client_start_kick_timer(struct irlan_cb *self, int timeout) */ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) { - IRDA_DEBUG(1, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -117,7 +111,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) if ((self->client.state != IRLAN_IDLE) || (self->provider.access_type == ACCESS_DIRECT)) { - IRDA_DEBUG(0, "%s(), already awake!\n", __func__ ); + pr_debug("%s(), already awake!\n", __func__); return; } @@ -126,7 +120,7 @@ void irlan_client_wakeup(struct irlan_cb *self, __u32 saddr, __u32 daddr) self->daddr = daddr; if (self->disconnect_reason == LM_USER_REQUEST) { - IRDA_DEBUG(0, "%s(), still stopped by user\n", __func__ ); + pr_debug("%s(), still stopped by user\n", __func__); return; } @@ -153,8 +147,6 @@ void irlan_client_discovery_indication(discinfo_t *discovery, struct irlan_cb *self; __u32 saddr, daddr; - IRDA_DEBUG(1, "%s()\n", __func__ ); - IRDA_ASSERT(discovery != NULL, return;); /* @@ -175,8 +167,8 @@ void irlan_client_discovery_indication(discinfo_t *discovery, if (self) { IRDA_ASSERT(self->magic == IRLAN_MAGIC, goto out;); - IRDA_DEBUG(1, "%s(), Found instance (%08x)!\n", __func__ , - daddr); + pr_debug("%s(), Found instance (%08x)!\n", __func__ , + daddr); irlan_client_wakeup(self, saddr, daddr); } @@ -195,8 +187,6 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap, { struct irlan_cb *self; - IRDA_DEBUG(2, "%s()\n", __func__ ); - self = instance; IRDA_ASSERT(self != NULL, return -1;); @@ -206,7 +196,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap, irlan_do_client_event(self, IRLAN_DATA_INDICATION, skb); /* Ready for a new command */ - IRDA_DEBUG(2, "%s(), clearing tx_busy\n", __func__ ); + pr_debug("%s(), clearing tx_busy\n", __func__); self->client.tx_busy = FALSE; /* Check if we have some queued commands waiting to be sent */ @@ -223,7 +213,7 @@ static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap, struct tsap_cb *tsap; struct sk_buff *skb; - IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); + pr_debug("%s(), reason=%d\n", __func__ , reason); self = instance; tsap = sap; @@ -255,8 +245,6 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self) struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -275,7 +263,7 @@ static void irlan_client_open_ctrl_tsap(struct irlan_cb *self) tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!tsap) { - IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); + pr_debug("%s(), Got no tsap!\n", __func__); return; } self->client.tsap_ctrl = tsap; @@ -295,8 +283,6 @@ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap, { struct irlan_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__ ); - self = instance; IRDA_ASSERT(self != NULL, return;); @@ -323,34 +309,34 @@ static void print_ret_code(__u8 code) printk(KERN_INFO "Success\n"); break; case 1: - IRDA_WARNING("IrLAN: Insufficient resources\n"); + net_warn_ratelimited("IrLAN: Insufficient resources\n"); break; case 2: - IRDA_WARNING("IrLAN: Invalid command format\n"); + net_warn_ratelimited("IrLAN: Invalid command format\n"); break; case 3: - IRDA_WARNING("IrLAN: Command not supported\n"); + net_warn_ratelimited("IrLAN: Command not supported\n"); break; case 4: - IRDA_WARNING("IrLAN: Parameter not supported\n"); + net_warn_ratelimited("IrLAN: Parameter not supported\n"); break; case 5: - IRDA_WARNING("IrLAN: Value not supported\n"); + net_warn_ratelimited("IrLAN: Value not supported\n"); break; case 6: - IRDA_WARNING("IrLAN: Not open\n"); + net_warn_ratelimited("IrLAN: Not open\n"); break; case 7: - IRDA_WARNING("IrLAN: Authentication required\n"); + net_warn_ratelimited("IrLAN: Authentication required\n"); break; case 8: - IRDA_WARNING("IrLAN: Invalid password\n"); + net_warn_ratelimited("IrLAN: Invalid password\n"); break; case 9: - IRDA_WARNING("IrLAN: Protocol error\n"); + net_warn_ratelimited("IrLAN: Protocol error\n"); break; case 255: - IRDA_WARNING("IrLAN: Asynchronous status\n"); + net_warn_ratelimited("IrLAN: Asynchronous status\n"); break; } } @@ -374,13 +360,13 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) IRDA_ASSERT(skb != NULL, return;); - IRDA_DEBUG(4, "%s() skb->len=%d\n", __func__ , (int) skb->len); + pr_debug("%s() skb->len=%d\n", __func__ , (int)skb->len); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); if (!skb) { - IRDA_ERROR("%s(), Got NULL skb!\n", __func__); + net_err_ratelimited("%s(), Got NULL skb!\n", __func__); return; } frame = skb->data; @@ -405,7 +391,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) /* How many parameters? */ count = frame[1]; - IRDA_DEBUG(4, "%s(), got %d parameters\n", __func__ , count); + pr_debug("%s(), got %d parameters\n", __func__ , count); ptr = frame+2; @@ -413,7 +399,7 @@ void irlan_client_parse_response(struct irlan_cb *self, struct sk_buff *skb) for (i=0; i<count;i++) { ret = irlan_extract_param(ptr, name, value, &val_len); if (ret < 0) { - IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ ); + pr_debug("%s(), IrLAN, Error!\n", __func__); break; } ptr += ret; @@ -437,7 +423,7 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, __u8 *bytes; int i; - IRDA_DEBUG(4, "%s(), parm=%s\n", __func__ , param); + pr_debug("%s(), parm=%s\n", __func__ , param); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -475,13 +461,13 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, else if (strcmp(value, "HOSTED") == 0) self->client.access_type = ACCESS_HOSTED; else { - IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ ); + pr_debug("%s(), unknown access type!\n", __func__); } } /* IRLAN version */ if (strcmp(param, "IRLAN_VER") == 0) { - IRDA_DEBUG(4, "IrLAN version %d.%d\n", (__u8) value[0], - (__u8) value[1]); + pr_debug("IrLAN version %d.%d\n", (__u8)value[0], + (__u8)value[1]); self->version[0] = value[0]; self->version[1] = value[1]; @@ -490,37 +476,37 @@ static void irlan_check_response_param(struct irlan_cb *self, char *param, /* Which remote TSAP to use for data channel */ if (strcmp(param, "DATA_CHAN") == 0) { self->dtsap_sel_data = value[0]; - IRDA_DEBUG(4, "Data TSAP = %02x\n", self->dtsap_sel_data); + pr_debug("Data TSAP = %02x\n", self->dtsap_sel_data); return; } if (strcmp(param, "CON_ARB") == 0) { memcpy(&tmp_cpu, value, 2); /* Align value */ le16_to_cpus(&tmp_cpu); /* Convert to host order */ self->client.recv_arb_val = tmp_cpu; - IRDA_DEBUG(2, "%s(), receive arb val=%d\n", __func__ , - self->client.recv_arb_val); + pr_debug("%s(), receive arb val=%d\n", __func__ , + self->client.recv_arb_val); } if (strcmp(param, "MAX_FRAME") == 0) { memcpy(&tmp_cpu, value, 2); /* Align value */ le16_to_cpus(&tmp_cpu); /* Convert to host order */ self->client.max_frame = tmp_cpu; - IRDA_DEBUG(4, "%s(), max frame=%d\n", __func__ , - self->client.max_frame); + pr_debug("%s(), max frame=%d\n", __func__ , + self->client.max_frame); } /* RECONNECT_KEY, in case the link goes down! */ if (strcmp(param, "RECONNECT_KEY") == 0) { - IRDA_DEBUG(4, "Got reconnect key: "); + pr_debug("Got reconnect key: "); /* for (i = 0; i < val_len; i++) */ /* printk("%02x", value[i]); */ memcpy(self->client.reconnect_key, value, val_len); self->client.key_len = val_len; - IRDA_DEBUG(4, "\n"); + pr_debug("\n"); } /* FILTER_ENTRY, have we got an ethernet address? */ if (strcmp(param, "FILTER_ENTRY") == 0) { bytes = value; - IRDA_DEBUG(4, "Ethernet address = %pM\n", bytes); + pr_debug("Ethernet address = %pM\n", bytes); for (i = 0; i < 6; i++) self->dev->dev_addr[i] = bytes[i]; } @@ -537,8 +523,6 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, { struct irlan_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(priv != NULL, return;); self = priv; @@ -550,7 +534,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, /* Check if request succeeded */ if (result != IAS_SUCCESS) { - IRDA_DEBUG(2, "%s(), got NULL value!\n", __func__ ); + pr_debug("%s(), got NULL value!\n", __func__); irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); return; @@ -568,7 +552,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id, irias_delete_value(value); break; default: - IRDA_DEBUG(2, "%s(), unknown type!\n", __func__ ); + pr_debug("%s(), unknown type!\n", __func__); break; } irlan_do_client_event(self, IRLAN_IAS_PROVIDER_NOT_AVAIL, NULL); diff --git a/net/irda/irlan/irlan_client_event.c b/net/irda/irlan/irlan_client_event.c index 8d5a8ebc444f..cc93fabbbb19 100644 --- a/net/irda/irlan/irlan_client_event.c +++ b/net/irda/irlan/irlan_client_event.c @@ -92,16 +92,14 @@ void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); switch (event) { case IRLAN_DISCOVERY_INDICATION: if (self->client.iriap) { - IRDA_WARNING("%s(), busy with a previous query\n", - __func__); + net_warn_ratelimited("%s(), busy with a previous query\n", + __func__); return -EBUSY; } @@ -114,10 +112,10 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, "IrLAN", "IrDA:TinyTP:LsapSel"); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -136,8 +134,6 @@ static int irlan_client_state_idle(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -154,7 +150,7 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_CONN); break; case IRLAN_IAS_PROVIDER_NOT_AVAIL: - IRDA_DEBUG(2, "%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__ ); + pr_debug("%s(), IAS_PROVIDER_NOT_AVAIL\n", __func__); irlan_next_client_state(self, IRLAN_IDLE); /* Give the client a kick! */ @@ -167,10 +163,10 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -189,8 +185,6 @@ static int irlan_client_state_query(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch (event) { @@ -204,10 +198,10 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -224,8 +218,6 @@ static int irlan_client_state_conn(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch (event) { @@ -244,10 +236,10 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -266,8 +258,6 @@ static int irlan_client_state_info(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -281,10 +271,10 @@ static int irlan_client_state_media(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -305,8 +295,6 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, { struct qos_info qos; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -344,7 +332,7 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_DATA); break; default: - IRDA_DEBUG(2, "%s(), unknown access type!\n", __func__ ); + pr_debug("%s(), unknown access type!\n", __func__); break; } break; @@ -353,10 +341,10 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } @@ -376,8 +364,6 @@ static int irlan_client_state_open(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -390,10 +376,10 @@ static int irlan_client_state_wait(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -407,8 +393,6 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, { struct qos_info qos; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -429,7 +413,7 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, } else if (self->client.recv_arb_val > self->provider.send_arb_val) { - IRDA_DEBUG(2, "%s(), lost the battle :-(\n", __func__ ); + pr_debug("%s(), lost the battle :-(\n", __func__); } break; case IRLAN_DATA_CONNECT_INDICATION: @@ -440,10 +424,10 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; case IRLAN_WATCHDOG_TIMEOUT: - IRDA_DEBUG(2, "%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__ ); + pr_debug("%s(), IRLAN_WATCHDOG_TIMEOUT\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -462,8 +446,6 @@ static int irlan_client_state_arb(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -476,7 +458,7 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_client_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -494,8 +476,6 @@ static int irlan_client_state_data(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (skb) dev_kfree_skb(skb); @@ -511,8 +491,6 @@ static int irlan_client_state_close(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_client_state_sync(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (skb) dev_kfree_skb(skb); diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index 5a2d0a695529..481bbc2a4349 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -118,8 +118,6 @@ static int __init irlan_init(void) struct irlan_cb *new; __u16 hints; - IRDA_DEBUG(2, "%s()\n", __func__ ); - #ifdef CONFIG_PROC_FS { struct proc_dir_entry *proc; proc = proc_create("irlan", 0, proc_irda, &irlan_fops); @@ -130,7 +128,6 @@ static int __init irlan_init(void) } #endif /* CONFIG_PROC_FS */ - IRDA_DEBUG(4, "%s()\n", __func__ ); hints = irlmp_service_to_hint(S_LAN); /* Register with IrLMP as a client */ @@ -173,8 +170,6 @@ static void __exit irlan_cleanup(void) { struct irlan_cb *self, *next; - IRDA_DEBUG(4, "%s()\n", __func__ ); - irlmp_unregister_client(ckey); irlmp_unregister_service(skey); @@ -201,8 +196,6 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr) struct net_device *dev; struct irlan_cb *self; - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Create network device with irlan */ dev = alloc_irlandev(eth ? "eth%d" : "irlan%d"); if (!dev) @@ -245,8 +238,8 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr) irlan_next_provider_state(self, IRLAN_IDLE); if (register_netdev(dev)) { - IRDA_DEBUG(2, "%s(), register_netdev() failed!\n", - __func__ ); + pr_debug("%s(), register_netdev() failed!\n", + __func__); self = NULL; free_netdev(dev); } else { @@ -266,8 +259,6 @@ static struct irlan_cb __init *irlan_open(__u32 saddr, __u32 daddr) */ static void __irlan_close(struct irlan_cb *self) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - ASSERT_RTNL(); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -314,8 +305,6 @@ static void irlan_connect_indication(void *instance, void *sap, struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(2, "%s()\n", __func__ ); - self = instance; tsap = sap; @@ -326,7 +315,7 @@ static void irlan_connect_indication(void *instance, void *sap, self->max_sdu_size = max_sdu_size; self->max_header_size = max_header_size; - IRDA_DEBUG(0, "%s: We are now connected!\n", __func__); + pr_debug("%s: We are now connected!\n", __func__); del_timer(&self->watchdog_timer); @@ -370,7 +359,7 @@ static void irlan_connect_confirm(void *instance, void *sap, /* TODO: we could set the MTU depending on the max_sdu_size */ - IRDA_DEBUG(0, "%s: We are now connected!\n", __func__); + pr_debug("%s: We are now connected!\n", __func__); del_timer(&self->watchdog_timer); /* @@ -403,7 +392,7 @@ static void irlan_disconnect_indication(void *instance, struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(0, "%s(), reason=%d\n", __func__ , reason); + pr_debug("%s(), reason=%d\n", __func__ , reason); self = instance; tsap = sap; @@ -415,29 +404,30 @@ static void irlan_disconnect_indication(void *instance, IRDA_ASSERT(tsap == self->tsap_data, return;); - IRDA_DEBUG(2, "IrLAN, data channel disconnected by peer!\n"); + pr_debug("IrLAN, data channel disconnected by peer!\n"); /* Save reason so we know if we should try to reconnect or not */ self->disconnect_reason = reason; switch (reason) { case LM_USER_REQUEST: /* User request */ - IRDA_DEBUG(2, "%s(), User requested\n", __func__ ); + pr_debug("%s(), User requested\n", __func__); break; case LM_LAP_DISCONNECT: /* Unexpected IrLAP disconnect */ - IRDA_DEBUG(2, "%s(), Unexpected IrLAP disconnect\n", __func__ ); + pr_debug("%s(), Unexpected IrLAP disconnect\n", __func__); break; case LM_CONNECT_FAILURE: /* Failed to establish IrLAP connection */ - IRDA_DEBUG(2, "%s(), IrLAP connect failed\n", __func__ ); + pr_debug("%s(), IrLAP connect failed\n", __func__); break; case LM_LAP_RESET: /* IrLAP reset */ - IRDA_DEBUG(2, "%s(), IrLAP reset\n", __func__ ); + pr_debug("%s(), IrLAP reset\n", __func__); break; case LM_INIT_DISCONNECT: - IRDA_DEBUG(2, "%s(), IrLMP connect failed\n", __func__ ); + pr_debug("%s(), IrLMP connect failed\n", __func__); break; default: - IRDA_ERROR("%s(), Unknown disconnect reason\n", __func__); + net_err_ratelimited("%s(), Unknown disconnect reason\n", + __func__); break; } @@ -459,8 +449,6 @@ void irlan_open_data_tsap(struct irlan_cb *self) struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -481,7 +469,7 @@ void irlan_open_data_tsap(struct irlan_cb *self) tsap = irttp_open_tsap(LSAP_ANY, DEFAULT_INITIAL_CREDIT, ¬ify); if (!tsap) { - IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); + pr_debug("%s(), Got no tsap!\n", __func__); return; } self->tsap_data = tsap; @@ -495,8 +483,6 @@ void irlan_open_data_tsap(struct irlan_cb *self) void irlan_close_tsaps(struct irlan_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -585,8 +571,6 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) { struct sk_buff *skb; - IRDA_DEBUG(2, "%s()\n", __func__ ); - if (irda_lock(&self->client.tx_busy) == FALSE) return -EBUSY; @@ -604,7 +588,7 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) dev_kfree_skb(skb); return -1; } - IRDA_DEBUG(2, "%s(), sending ...\n", __func__ ); + pr_debug("%s(), sending ...\n", __func__); return irttp_data_request(self->client.tsap_ctrl, skb); } @@ -617,8 +601,6 @@ int irlan_run_ctrl_tx_queue(struct irlan_cb *self) */ static void irlan_ctrl_data_request(struct irlan_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s()\n", __func__ ); - /* Queue command */ skb_queue_tail(&self->client.txq, skb); @@ -637,8 +619,6 @@ void irlan_get_provider_info(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -670,8 +650,6 @@ void irlan_open_data_channel(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -705,8 +683,6 @@ void irlan_close_data_channel(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -746,8 +722,6 @@ static void irlan_open_unicast_addr(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -788,8 +762,6 @@ void irlan_set_broadcast_filter(struct irlan_cb *self, int status) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -832,8 +804,6 @@ void irlan_set_multicast_filter(struct irlan_cb *self, int status) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -877,8 +847,6 @@ static void irlan_get_unicast_addr(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -917,8 +885,6 @@ void irlan_get_media_char(struct irlan_cb *self) struct sk_buff *skb; __u8 *frame; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -1005,7 +971,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, int n=0; if (skb == NULL) { - IRDA_DEBUG(2, "%s(), Got NULL skb\n", __func__ ); + pr_debug("%s(), Got NULL skb\n", __func__); return 0; } @@ -1022,7 +988,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, IRDA_ASSERT(value_len > 0, return 0;); break; default: - IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __func__ ); + pr_debug("%s(), Unknown parameter type!\n", __func__); return 0; } @@ -1031,7 +997,7 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type, /* Make space for data */ if (skb_tailroom(skb) < (param_len+value_len+3)) { - IRDA_DEBUG(2, "%s(), No more space at end of skb\n", __func__ ); + pr_debug("%s(), No more space at end of skb\n", __func__); return 0; } skb_put(skb, param_len+value_len+3); @@ -1078,13 +1044,11 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) __u16 val_len; int n=0; - IRDA_DEBUG(4, "%s()\n", __func__ ); - /* get length of parameter name (1 byte) */ name_len = buf[n++]; if (name_len > 254) { - IRDA_DEBUG(2, "%s(), name_len > 254\n", __func__ ); + pr_debug("%s(), name_len > 254\n", __func__); return -RSP_INVALID_COMMAND_FORMAT; } @@ -1101,7 +1065,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) le16_to_cpus(&val_len); n+=2; if (val_len >= 1016) { - IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ ); + pr_debug("%s(), parameter length to long\n", __func__); return -RSP_INVALID_COMMAND_FORMAT; } *len = val_len; @@ -1111,8 +1075,8 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len) value[val_len] = '\0'; n+=val_len; - IRDA_DEBUG(4, "Parameter: %s ", name); - IRDA_DEBUG(4, "Value: %s\n", value); + pr_debug("Parameter: %s ", name); + pr_debug("Value: %s\n", value); return n; } diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index dc13f1a45f2f..fcfbe579434a 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -110,8 +110,6 @@ static int irlan_eth_open(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); - IRDA_DEBUG(2, "%s()\n", __func__); - /* Ready to play! */ netif_stop_queue(dev); /* Wait until data link is ready */ @@ -137,8 +135,6 @@ static int irlan_eth_close(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); - IRDA_DEBUG(2, "%s()\n", __func__); - /* Stop device */ netif_stop_queue(dev); @@ -231,8 +227,8 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb) return 0; } if (skb->len < ETH_HLEN) { - IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n", - __func__, skb->len); + pr_debug("%s() : IrLAN frame too short (%d)\n", + __func__, skb->len); dev->stats.rx_dropped++; dev_kfree_skb(skb); return 0; @@ -281,9 +277,9 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) IRDA_ASSERT(dev != NULL, return;); - IRDA_DEBUG(0, "%s() : flow %s ; running %d\n", __func__, - flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", - netif_running(dev)); + pr_debug("%s() : flow %s ; running %d\n", __func__, + flow == FLOW_STOP ? "FLOW_STOP" : "FLOW_START", + netif_running(dev)); switch (flow) { case FLOW_STOP: @@ -310,32 +306,30 @@ static void irlan_eth_set_multicast_list(struct net_device *dev) { struct irlan_cb *self = netdev_priv(dev); - IRDA_DEBUG(2, "%s()\n", __func__); - /* Check if data channel has been connected yet */ if (self->client.state != IRLAN_DATA) { - IRDA_DEBUG(1, "%s(), delaying!\n", __func__); + pr_debug("%s(), delaying!\n", __func__); return; } if (dev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ - IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n"); + net_warn_ratelimited("Promiscuous mode not implemented by IrLAN!\n"); } else if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > HW_MAX_ADDRS) { /* Disable promiscuous mode, use normal mode. */ - IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__); + pr_debug("%s(), Setting multicast filter\n", __func__); /* hardware_set_filter(NULL); */ irlan_set_multicast_filter(self, TRUE); } else if (!netdev_mc_empty(dev)) { - IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__); + pr_debug("%s(), Setting multicast filter\n", __func__); /* Walk the address list, and load the filter */ /* hardware_set_filter(dev->mc_list); */ irlan_set_multicast_filter(self, TRUE); } else { - IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__); + pr_debug("%s(), Clearing multicast filter\n", __func__); irlan_set_multicast_filter(self, FALSE); } diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c index 43f16040a6fe..9a1cc11c16f6 100644 --- a/net/irda/irlan/irlan_event.c +++ b/net/irda/irlan/irlan_event.c @@ -40,7 +40,7 @@ const char * const irlan_state[] = { void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) { - IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]); + pr_debug("%s(), %s\n", __func__ , irlan_state[state]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -50,7 +50,7 @@ void irlan_next_client_state(struct irlan_cb *self, IRLAN_STATE state) void irlan_next_provider_state(struct irlan_cb *self, IRLAN_STATE state) { - IRDA_DEBUG(2, "%s(), %s\n", __func__ , irlan_state[state]); + pr_debug("%s(), %s\n", __func__ , irlan_state[state]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c index 7977be7caf0f..e755e90b2f26 100644 --- a/net/irda/irlan/irlan_filter.c +++ b/net/irda/irlan/irlan_filter.c @@ -43,7 +43,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_operation == DYNAMIC)) { - IRDA_DEBUG(0, "Giving peer a dynamic Ethernet address\n"); + pr_debug("Giving peer a dynamic Ethernet address\n"); self->provider.mac_address[0] = 0x40; self->provider.mac_address[1] = 0x00; self->provider.mac_address[2] = 0x00; @@ -73,7 +73,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == FILTER)) { - IRDA_DEBUG(0, "Directed filter on\n"); + pr_debug("Directed filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -81,7 +81,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_DIRECTED) && (self->provider.filter_mode == NONE)) { - IRDA_DEBUG(0, "Directed filter off\n"); + pr_debug("Directed filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -90,7 +90,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == FILTER)) { - IRDA_DEBUG(0, "Broadcast filter on\n"); + pr_debug("Broadcast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -98,7 +98,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_BROADCAST) && (self->provider.filter_mode == NONE)) { - IRDA_DEBUG(0, "Broadcast filter off\n"); + pr_debug("Broadcast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -106,7 +106,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == FILTER)) { - IRDA_DEBUG(0, "Multicast filter on\n"); + pr_debug("Multicast filter on\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -114,7 +114,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_mode == NONE)) { - IRDA_DEBUG(0, "Multicast filter off\n"); + pr_debug("Multicast filter off\n"); skb->data[0] = 0x00; /* Success */ skb->data[1] = 0x00; return; @@ -122,7 +122,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) if ((self->provider.filter_type == IRLAN_MULTICAST) && (self->provider.filter_operation == GET)) { - IRDA_DEBUG(0, "Multicast filter get\n"); + pr_debug("Multicast filter get\n"); skb->data[0] = 0x00; /* Success? */ skb->data[1] = 0x02; irlan_insert_string_param(skb, "FILTER_MODE", "NONE"); @@ -132,7 +132,7 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) skb->data[0] = 0x00; /* Command not supported */ skb->data[1] = 0x00; - IRDA_DEBUG(0, "Not implemented!\n"); + pr_debug("Not implemented!\n"); } /* @@ -143,18 +143,15 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb) */ void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); - IRDA_DEBUG(4, "%s, %s\n", param, value); + pr_debug("%s, %s\n", param, value); /* * This is experimental!! DB. */ if (strcmp(param, "MODE") == 0) { - IRDA_DEBUG(0, "%s()\n", __func__ ); self->use_udata = TRUE; return; } diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c index 4664855222f4..15c292cf2644 100644 --- a/net/irda/irlan/irlan_provider.c +++ b/net/irda/irlan/irlan_provider.c @@ -70,8 +70,6 @@ static int irlan_provider_data_indication(void *instance, void *sap, struct irlan_cb *self; __u8 code; - IRDA_DEBUG(4, "%s()\n", __func__ ); - self = instance; IRDA_ASSERT(self != NULL, return -1;); @@ -82,32 +80,32 @@ static int irlan_provider_data_indication(void *instance, void *sap, code = skb->data[0]; switch(code) { case CMD_GET_PROVIDER_INFO: - IRDA_DEBUG(4, "Got GET_PROVIDER_INFO command!\n"); + pr_debug("Got GET_PROVIDER_INFO command!\n"); irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb); break; case CMD_GET_MEDIA_CHAR: - IRDA_DEBUG(4, "Got GET_MEDIA_CHAR command!\n"); + pr_debug("Got GET_MEDIA_CHAR command!\n"); irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb); break; case CMD_OPEN_DATA_CHANNEL: - IRDA_DEBUG(4, "Got OPEN_DATA_CHANNEL command!\n"); + pr_debug("Got OPEN_DATA_CHANNEL command!\n"); irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb); break; case CMD_FILTER_OPERATION: - IRDA_DEBUG(4, "Got FILTER_OPERATION command!\n"); + pr_debug("Got FILTER_OPERATION command!\n"); irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb); break; case CMD_RECONNECT_DATA_CHAN: - IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __func__ ); - IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ ); + pr_debug("%s(), Got RECONNECT_DATA_CHAN command\n", __func__); + pr_debug("%s(), NOT IMPLEMENTED\n", __func__); break; case CMD_CLOSE_DATA_CHAN: - IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n"); - IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ ); + pr_debug("Got CLOSE_DATA_CHAN command!\n"); + pr_debug("%s(), NOT IMPLEMENTED\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ ); + pr_debug("%s(), Unknown command!\n", __func__); break; } return 0; @@ -128,8 +126,6 @@ static void irlan_provider_connect_indication(void *instance, void *sap, struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(0, "%s()\n", __func__ ); - self = instance; tsap = sap; @@ -179,7 +175,7 @@ static void irlan_provider_disconnect_indication(void *instance, void *sap, struct irlan_cb *self; struct tsap_cb *tsap; - IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); + pr_debug("%s(), reason=%d\n", __func__ , reason); self = instance; tsap = sap; @@ -233,7 +229,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd, IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;); - IRDA_DEBUG(4, "%s(), skb->len=%d\n", __func__ , (int)skb->len); + pr_debug("%s(), skb->len=%d\n", __func__ , (int)skb->len); IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;); @@ -255,7 +251,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd, /* How many parameters? */ count = frame[1]; - IRDA_DEBUG(4, "Got %d parameters\n", count); + pr_debug("Got %d parameters\n", count); ptr = frame+2; @@ -263,7 +259,7 @@ int irlan_provider_parse_command(struct irlan_cb *self, int cmd, for (i=0; i<count;i++) { ret = irlan_extract_param(ptr, name, value, &val_len); if (ret < 0) { - IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ ); + pr_debug("%s(), IrLAN, Error!\n", __func__); break; } ptr+=ret; @@ -288,8 +284,6 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, { struct sk_buff *skb; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); @@ -320,7 +314,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, irlan_insert_string_param(skb, "MEDIA", "802.5"); break; default: - IRDA_DEBUG(2, "%s(), unknown media type!\n", __func__ ); + pr_debug("%s(), unknown media type!\n", __func__); break; } irlan_insert_short_param(skb, "IRLAN_VER", 0x0101); @@ -344,7 +338,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED"); break; default: - IRDA_DEBUG(2, "%s(), Unknown access type\n", __func__ ); + pr_debug("%s(), Unknown access type\n", __func__); break; } irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee); @@ -364,7 +358,7 @@ void irlan_provider_send_reply(struct irlan_cb *self, int command, irlan_filter_request(self, skb); break; default: - IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ ); + pr_debug("%s(), Unknown command!\n", __func__); break; } @@ -382,8 +376,6 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self) struct tsap_cb *tsap; notify_t notify; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -403,7 +395,7 @@ int irlan_provider_open_ctrl_tsap(struct irlan_cb *self) tsap = irttp_open_tsap(LSAP_ANY, 1, ¬ify); if (!tsap) { - IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ ); + pr_debug("%s(), Got no tsap!\n", __func__); return -1; } self->provider.tsap_ctrl = tsap; diff --git a/net/irda/irlan/irlan_provider_event.c b/net/irda/irlan/irlan_provider_event.c index 01a9d7c993ee..9c4f7f51d6b5 100644 --- a/net/irda/irlan/irlan_provider_event.c +++ b/net/irda/irlan/irlan_provider_event.c @@ -72,8 +72,6 @@ void irlan_do_provider_event(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -82,7 +80,7 @@ static int irlan_provider_state_idle(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_provider_state( self, IRLAN_INFO); break; default: - IRDA_DEBUG(4, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -101,8 +99,6 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, { int ret; - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -147,7 +143,7 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -166,8 +162,6 @@ static int irlan_provider_state_info(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); switch(event) { @@ -186,7 +180,7 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) @@ -205,8 +199,6 @@ static int irlan_provider_state_open(struct irlan_cb *self, IRLAN_EVENT event, static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__ ); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); @@ -221,7 +213,7 @@ static int irlan_provider_state_data(struct irlan_cb *self, IRLAN_EVENT event, irlan_next_provider_state(self, IRLAN_IDLE); break; default: - IRDA_DEBUG( 0, "%s(), Unknown event %d\n", __func__ , event); + pr_debug("%s(), Unknown event %d\n", __func__ , event); break; } if (skb) diff --git a/net/irda/irlap.c b/net/irda/irlap.c index a778df55f5d6..7f2cafddfb6e 100644 --- a/net/irda/irlap.c +++ b/net/irda/irlap.c @@ -60,8 +60,7 @@ static void __irlap_close(struct irlap_cb *self); static void irlap_init_qos_capabilities(struct irlap_cb *self, struct qos_info *qos_user); -#ifdef CONFIG_IRDA_DEBUG -static const char *const lap_reasons[] = { +static const char *const lap_reasons[] __maybe_unused = { "ERROR, NOT USED", "LAP_DISC_INDICATION", "LAP_NO_RESPONSE", @@ -71,7 +70,6 @@ static const char *const lap_reasons[] = { "LAP_PRIMARY_CONFLICT", "ERROR, NOT USED", }; -#endif /* CONFIG_IRDA_DEBUG */ int __init irlap_init(void) { @@ -85,8 +83,8 @@ int __init irlap_init(void) /* Allocate master array */ irlap = hashbin_new(HB_LOCK); if (irlap == NULL) { - IRDA_ERROR("%s: can't allocate irlap hashbin!\n", - __func__); + net_err_ratelimited("%s: can't allocate irlap hashbin!\n", + __func__); return -ENOMEM; } @@ -111,8 +109,6 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos, { struct irlap_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__); - /* Initialize the irlap structure. */ self = kzalloc(sizeof(struct irlap_cb), GFP_KERNEL); if (self == NULL) @@ -213,8 +209,6 @@ void irlap_close(struct irlap_cb *self) { struct irlap_cb *lap; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -229,7 +223,7 @@ void irlap_close(struct irlap_cb *self) /* Be sure that we manage to remove ourself from the hash */ lap = hashbin_remove(irlap, self->saddr, NULL); if (!lap) { - IRDA_DEBUG(1, "%s(), Didn't find myself!\n", __func__); + pr_debug("%s(), Didn't find myself!\n", __func__); return; } __irlap_close(lap); @@ -244,8 +238,6 @@ EXPORT_SYMBOL(irlap_close); */ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -263,8 +255,6 @@ void irlap_connect_indication(struct irlap_cb *self, struct sk_buff *skb) */ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) { - IRDA_DEBUG(4, "%s()\n", __func__); - irlap_do_event(self, CONNECT_RESPONSE, userdata, NULL); } @@ -278,7 +268,7 @@ void irlap_connect_response(struct irlap_cb *self, struct sk_buff *userdata) void irlap_connect_request(struct irlap_cb *self, __u32 daddr, struct qos_info *qos_user, int sniff) { - IRDA_DEBUG(3, "%s(), daddr=0x%08x\n", __func__, daddr); + pr_debug("%s(), daddr=0x%08x\n", __func__, daddr); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -305,8 +295,6 @@ void irlap_connect_request(struct irlap_cb *self, __u32 daddr, */ void irlap_connect_confirm(struct irlap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -342,8 +330,6 @@ void irlap_data_request(struct irlap_cb *self, struct sk_buff *skb, IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), return;); skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); @@ -389,8 +375,6 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(skb_headroom(skb) >= (LAP_ADDR_HEADER+LAP_CTRL_HEADER), return;); skb_push(skb, LAP_ADDR_HEADER+LAP_CTRL_HEADER); @@ -415,8 +399,6 @@ void irlap_unitdata_request(struct irlap_cb *self, struct sk_buff *skb) #ifdef CONFIG_IRDA_ULTRA void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -435,8 +417,6 @@ void irlap_unitdata_indication(struct irlap_cb *self, struct sk_buff *skb) */ void irlap_disconnect_request(struct irlap_cb *self) { - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -456,7 +436,7 @@ void irlap_disconnect_request(struct irlap_cb *self) irlap_do_event(self, DISCONNECT_REQUEST, NULL, NULL); break; default: - IRDA_DEBUG(2, "%s(), disconnect pending!\n", __func__); + pr_debug("%s(), disconnect pending!\n", __func__); self->disconnect_pending = TRUE; break; } @@ -470,7 +450,7 @@ void irlap_disconnect_request(struct irlap_cb *self) */ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) { - IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, lap_reasons[reason]); + pr_debug("%s(), reason=%s\n", __func__, lap_reasons[reason]); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -480,7 +460,7 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) switch (reason) { case LAP_RESET_INDICATION: - IRDA_DEBUG(1, "%s(), Sending reset request!\n", __func__); + pr_debug("%s(), Sending reset request!\n", __func__); irlap_do_event(self, RESET_REQUEST, NULL, NULL); break; case LAP_NO_RESPONSE: /* FALLTHROUGH */ @@ -491,7 +471,8 @@ void irlap_disconnect_indication(struct irlap_cb *self, LAP_REASON reason) reason, NULL); break; default: - IRDA_ERROR("%s: Unknown reason %d\n", __func__, reason); + net_err_ratelimited("%s: Unknown reason %d\n", + __func__, reason); } } @@ -509,7 +490,7 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); - IRDA_DEBUG(4, "%s(), nslots = %d\n", __func__, discovery->nslots); + pr_debug("%s(), nslots = %d\n", __func__, discovery->nslots); IRDA_ASSERT((discovery->nslots == 1) || (discovery->nslots == 6) || (discovery->nslots == 8) || (discovery->nslots == 16), @@ -517,8 +498,8 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) /* Discovery is only possible in NDM mode */ if (self->state != LAP_NDM) { - IRDA_DEBUG(4, "%s(), discovery only possible in NDM mode\n", - __func__); + pr_debug("%s(), discovery only possible in NDM mode\n", + __func__); irlap_discovery_confirm(self, NULL); /* Note : in theory, if we are not in NDM, we could postpone * the discovery like we do for connection request. @@ -540,8 +521,8 @@ void irlap_discovery_request(struct irlap_cb *self, discovery_t *discovery) self->discovery_log = hashbin_new(HB_NOLOCK); if (self->discovery_log == NULL) { - IRDA_WARNING("%s(), Unable to allocate discovery log!\n", - __func__); + net_warn_ratelimited("%s(), Unable to allocate discovery log!\n", + __func__); return; } @@ -596,8 +577,6 @@ void irlap_discovery_confirm(struct irlap_cb *self, hashbin_t *discovery_log) */ void irlap_discovery_indication(struct irlap_cb *self, discovery_t *discovery) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(discovery != NULL, return;); @@ -625,10 +604,10 @@ void irlap_status_indication(struct irlap_cb *self, int quality_of_link) { switch (quality_of_link) { case STATUS_NO_ACTIVITY: - IRDA_MESSAGE("IrLAP, no activity on link!\n"); + net_info_ratelimited("IrLAP, no activity on link!\n"); break; case STATUS_NOISY: - IRDA_MESSAGE("IrLAP, noisy link!\n"); + net_info_ratelimited("IrLAP, noisy link!\n"); break; default: break; @@ -642,8 +621,6 @@ void irlap_status_indication(struct irlap_cb *self, int quality_of_link) */ void irlap_reset_indication(struct irlap_cb *self) { - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -658,7 +635,6 @@ void irlap_reset_indication(struct irlap_cb *self) */ void irlap_reset_confirm(void) { - IRDA_DEBUG(1, "%s()\n", __func__); } /* @@ -758,7 +734,7 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr) { /* nr as expected? */ if (nr == self->vs) { - IRDA_DEBUG(4, "%s(), expected!\n", __func__); + pr_debug("%s(), expected!\n", __func__); return NR_EXPECTED; } @@ -786,8 +762,6 @@ int irlap_validate_nr_received(struct irlap_cb *self, int nr) */ void irlap_initiate_connection_state(struct irlap_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -869,7 +843,7 @@ static void irlap_change_speed(struct irlap_cb *self, __u32 speed, int now) { struct sk_buff *skb; - IRDA_DEBUG(0, "%s(), setting speed to %d\n", __func__, speed); + pr_debug("%s(), setting speed to %d\n", __func__, speed); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -912,7 +886,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self, * user may not have set all of them. */ if (qos_user) { - IRDA_DEBUG(1, "%s(), Found user specified QoS!\n", __func__); + pr_debug("%s(), Found user specified QoS!\n", __func__); if (qos_user->baud_rate.bits) self->qos_rx.baud_rate.bits &= qos_user->baud_rate.bits; @@ -942,8 +916,6 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self, */ void irlap_apply_default_connection_parameters(struct irlap_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -1005,8 +977,6 @@ void irlap_apply_default_connection_parameters(struct irlap_cb *self) */ void irlap_apply_connection_parameters(struct irlap_cb *self, int now) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -1085,12 +1055,12 @@ void irlap_apply_connection_parameters(struct irlap_cb *self, int now) self->N1 = sysctl_warn_noreply_time * 1000 / self->qos_rx.max_turn_time.value; - IRDA_DEBUG(4, "Setting N1 = %d\n", self->N1); + pr_debug("Setting N1 = %d\n", self->N1); /* Set N2 to match our own disconnect time */ self->N2 = self->qos_tx.link_disc_time.value * 1000 / self->qos_rx.max_turn_time.value; - IRDA_DEBUG(4, "Setting N2 = %d\n", self->N2); + pr_debug("Setting N2 = %d\n", self->N2); } #ifdef CONFIG_PROC_FS diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c index ccd214f9d196..0e1b4d79f745 100644 --- a/net/irda/irlap_event.c +++ b/net/irda/irlap_event.c @@ -78,8 +78,7 @@ static int irlap_state_sclose (struct irlap_cb *self, IRLAP_EVENT event, static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event, struct sk_buff *, struct irlap_info *); -#ifdef CONFIG_IRDA_DEBUG -static const char *const irlap_event[] = { +static const char *const irlap_event[] __maybe_unused = { "DISCOVERY_REQUEST", "CONNECT_REQUEST", "CONNECT_RESPONSE", @@ -119,7 +118,6 @@ static const char *const irlap_event[] = { "BACKOFF_TIMER_EXPIRED", "MEDIA_BUSY_TIMER_EXPIRED", }; -#endif /* CONFIG_IRDA_DEBUG */ const char *const irlap_state[] = { "LAP_NDM", @@ -218,7 +216,7 @@ static void irlap_start_poll_timer(struct irlap_cb *self, int timeout) } else self->fast_RR = FALSE; - IRDA_DEBUG(3, "%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies); + pr_debug("%s(), timeout=%d (%ld)\n", __func__, timeout, jiffies); #endif /* CONFIG_IRDA_FAST_RR */ if (timeout == 0) @@ -242,8 +240,8 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, if (!self || self->magic != LAP_MAGIC) return; - IRDA_DEBUG(3, "%s(), event = %s, state = %s\n", __func__, - irlap_event[event], irlap_state[self->state]); + pr_debug("%s(), event = %s, state = %s\n", __func__, + irlap_event[event], irlap_state[self->state]); ret = (*state[self->state])(self, event, skb, info); @@ -260,8 +258,8 @@ void irlap_do_event(struct irlap_cb *self, IRLAP_EVENT event, * try to disconnect link if we send any data frames, since * that will change the state away form XMIT */ - IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, - skb_queue_len(&self->txq)); + pr_debug("%s() : queue len = %d\n", __func__, + skb_queue_len(&self->txq)); if (!skb_queue_empty(&self->txq)) { /* Prevent race conditions with irlap_data_request() */ @@ -340,8 +338,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, /* Note : this will never happen, because we test * media busy in irlap_connect_request() and * postpone the event... - Jean II */ - IRDA_DEBUG(0, "%s(), CONNECT_REQUEST: media busy!\n", - __func__); + pr_debug("%s(), CONNECT_REQUEST: media busy!\n", + __func__); /* Always switch state before calling upper layers */ irlap_next_state(self, LAP_NDM); @@ -367,16 +365,16 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, irlap_connect_indication(self, skb); } else { - IRDA_DEBUG(0, "%s(), SNRM frame does not " - "contain an I field!\n", __func__); + pr_debug("%s(), SNRM frame does not contain an I field!\n", + __func__); } break; case DISCOVERY_REQUEST: IRDA_ASSERT(info != NULL, return -1;); if (self->media_busy) { - IRDA_DEBUG(1, "%s(), DISCOVERY_REQUEST: media busy!\n", - __func__); + pr_debug("%s(), DISCOVERY_REQUEST: media busy!\n", + __func__); /* irlap->log.condition = MEDIA_BUSY; */ /* This will make IrLMP try again */ @@ -442,7 +440,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, * those cases... * Jean II */ - IRDA_DEBUG(1, "%s(), Receiving final discovery request, missed the discovery slots :-(\n", __func__); + pr_debug("%s(), Receiving final discovery request, missed the discovery slots :-(\n", + __func__); /* Last discovery request -> in the log */ irlap_discovery_indication(self, info->discovery); @@ -520,8 +519,8 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, case RECV_UI_FRAME: /* Only accept broadcast frames in NDM mode */ if (info->caddr != CBROADCAST) { - IRDA_DEBUG(0, "%s(), not a broadcast frame!\n", - __func__); + pr_debug("%s(), not a broadcast frame!\n", + __func__); } else irlap_unitdata_indication(self, skb); break; @@ -537,11 +536,11 @@ static int irlap_state_ndm(struct irlap_cb *self, IRLAP_EVENT event, irlap_send_test_frame(self, CBROADCAST, info->daddr, skb); break; case RECV_TEST_RSP: - IRDA_DEBUG(0, "%s() not implemented!\n", __func__); + pr_debug("%s() not implemented!\n", __func__); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, - irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", __func__, + irlap_event[event]); ret = -1; break; @@ -568,13 +567,12 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, IRDA_ASSERT(info != NULL, return -1;); IRDA_ASSERT(info->discovery != NULL, return -1;); - IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__, - info->discovery->data.daddr); + pr_debug("%s(), daddr=%08x\n", __func__, + info->discovery->data.daddr); if (!self->discovery_log) { - IRDA_WARNING("%s: discovery log is gone! " - "maybe the discovery timeout has been set" - " too short?\n", __func__); + net_warn_ratelimited("%s: discovery log is gone! maybe the discovery timeout has been set too short?\n", + __func__); break; } hashbin_insert(self->discovery_log, @@ -599,7 +597,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, IRDA_ASSERT(info != NULL, return -1;); - IRDA_DEBUG(1, "%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", __func__, info->s); + pr_debug("%s(), Receiving discovery request (s = %d) while performing discovery :-(\n", + __func__, info->s); /* Last discovery request ? */ if (info->s == 0xff) @@ -613,8 +612,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, * timing requirements. */ if (irda_device_is_receiving(self->netdev) && !self->add_wait) { - IRDA_DEBUG(2, "%s(), device is slow to answer, " - "waiting some more!\n", __func__); + pr_debug("%s(), device is slow to answer, waiting some more!\n", + __func__); irlap_start_slot_timer(self, msecs_to_jiffies(10)); self->add_wait = TRUE; return ret; @@ -650,8 +649,8 @@ static int irlap_state_query(struct irlap_cb *self, IRLAP_EVENT event, } break; default: - IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, - irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", __func__, + irlap_event[event]); ret = -1; break; @@ -672,15 +671,13 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, discovery_t *discovery_rsp; int ret=0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); switch (event) { case QUERY_TIMER_EXPIRED: - IRDA_DEBUG(0, "%s(), QUERY_TIMER_EXPIRED <%ld>\n", - __func__, jiffies); + pr_debug("%s(), QUERY_TIMER_EXPIRED <%ld>\n", + __func__, jiffies); irlap_next_state(self, LAP_NDM); break; case RECV_DISCOVERY_XID_CMD: @@ -718,8 +715,8 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event, } break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, %s\n", __func__, + event, irlap_event[event]); ret = -1; break; @@ -739,7 +736,7 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]); + pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -799,20 +796,20 @@ static int irlap_state_conn(struct irlap_cb *self, IRLAP_EVENT event, break; case RECV_DISCOVERY_XID_CMD: - IRDA_DEBUG(3, "%s(), event RECV_DISCOVER_XID_CMD!\n", - __func__); + pr_debug("%s(), event RECV_DISCOVER_XID_CMD!\n", + __func__); irlap_next_state(self, LAP_NDM); break; case DISCONNECT_REQUEST: - IRDA_DEBUG(0, "%s(), Disconnect request!\n", __func__); + pr_debug("%s(), Disconnect request!\n", __func__); irlap_send_dm_frame(self); irlap_next_state( self, LAP_NDM); irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, %s\n", __func__, + event, irlap_event[event]); ret = -1; break; @@ -833,8 +830,6 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -862,7 +857,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, self->retry_count++; break; case RECV_SNRM_CMD: - IRDA_DEBUG(4, "%s(), SNRM battle!\n", __func__); + pr_debug("%s(), SNRM battle!\n", __func__); IRDA_ASSERT(skb != NULL, return 0;); IRDA_ASSERT(info != NULL, return 0;); @@ -949,8 +944,8 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, irlap_disconnect_indication(self, LAP_DISC_INDICATION); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, %s\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, %s\n", __func__, + event, irlap_event[event]); ret = -1; break; @@ -967,7 +962,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event, static int irlap_state_offline(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { - IRDA_DEBUG( 0, "%s(), Unknown event\n", __func__); + pr_debug("%s(), Unknown event\n", __func__); return -1; } @@ -1030,8 +1025,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, * speed and turn-around-time. */ if((!nextfit) && (skb->len > self->bytes_left)) { - IRDA_DEBUG(0, "%s(), Not allowed to transmit" - " more bytes!\n", __func__); + pr_debug("%s(), Not allowed to transmit more bytes!\n", + __func__); /* Requeue the skb */ skb_queue_head(&self->txq, skb_get(skb)); /* @@ -1082,8 +1077,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, self->fast_RR = FALSE; #endif /* CONFIG_IRDA_FAST_RR */ } else { - IRDA_DEBUG(4, "%s(), Unable to send! remote busy?\n", - __func__); + pr_debug("%s(), Unable to send! remote busy?\n", + __func__); skb_queue_head(&self->txq, skb_get(skb)); /* @@ -1094,8 +1089,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, } break; case POLL_TIMER_EXPIRED: - IRDA_DEBUG(3, "%s(), POLL_TIMER_EXPIRED <%ld>\n", - __func__, jiffies); + pr_debug("%s(), POLL_TIMER_EXPIRED <%ld>\n", + __func__, jiffies); irlap_send_rr_frame(self, CMD_FRAME); /* Return to NRM properly - Jean II */ self->window = self->window_size; @@ -1120,8 +1115,8 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event, * when we return... - Jean II */ break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s\n", - __func__, irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", + __func__, irlap_event[event]); ret = -EINVAL; break; @@ -1139,8 +1134,6 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1174,7 +1167,7 @@ static int irlap_state_pclose(struct irlap_cb *self, IRLAP_EVENT event, } break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d\n", __func__, event); + pr_debug("%s(), Unknown event %d\n", __func__, event); ret = -1; break; @@ -1296,9 +1289,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, /* Keep state */ irlap_next_state(self, LAP_NRM_P); } else { - IRDA_DEBUG(4, - "%s(), missing or duplicate frame!\n", - __func__); + pr_debug("%s(), missing or duplicate frame!\n", + __func__); /* Update Nr received */ irlap_update_nr_received(self, info->nr); @@ -1367,8 +1359,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, if ((ns_status == NS_UNEXPECTED) && (nr_status == NR_UNEXPECTED)) { - IRDA_DEBUG(4, "%s(), unexpected nr and ns!\n", - __func__); + pr_debug("%s(), unexpected nr and ns!\n", + __func__); if (info->pf) { /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); @@ -1408,9 +1400,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, } break; } - IRDA_DEBUG(1, "%s(), Not implemented!\n", __func__); - IRDA_DEBUG(1, "%s(), event=%s, ns_status=%d, nr_status=%d\n", - __func__, irlap_event[event], ns_status, nr_status); + pr_debug("%s(), Not implemented!\n", __func__); + pr_debug("%s(), event=%s, ns_status=%d, nr_status=%d\n", + __func__, irlap_event[event], ns_status, nr_status); break; case RECV_UI_FRAME: /* Poll bit cleared? */ @@ -1421,7 +1413,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, del_timer(&self->final_timer); irlap_data_indication(self, skb, TRUE); irlap_next_state(self, LAP_XMIT_P); - IRDA_DEBUG(1, "%s: RECV_UI_FRAME: next state %s\n", __func__, irlap_state[self->state]); + pr_debug("%s: RECV_UI_FRAME: next state %s\n", + __func__, irlap_state[self->state]); irlap_start_poll_timer(self, self->poll_timeout); } break; @@ -1464,10 +1457,9 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, /* Update Nr received */ irlap_update_nr_received(self, info->nr); - IRDA_DEBUG(4, "RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, " - "vs=%d, vr=%d\n", - self->retry_count, info->nr, self->va, - self->vs, self->vr); + pr_debug("RECV_RR_FRAME: Retrans:%d, nr=%d, va=%d, vs=%d, vr=%d\n", + self->retry_count, info->nr, self->va, + self->vs, self->vr); /* Resend rejected frames */ irlap_resend_rejected_frames(self, CMD_FRAME); @@ -1475,8 +1467,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, irlap_next_state(self, LAP_NRM_P); } else if (ret == NR_INVALID) { - IRDA_DEBUG(1, "%s(), Received RR with " - "invalid nr !\n", __func__); + pr_debug("%s(), Received RR with invalid nr !\n", + __func__); irlap_next_state(self, LAP_RESET_WAIT); @@ -1512,8 +1504,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, * we only do this once for each frame. */ if (irda_device_is_receiving(self->netdev) && !self->add_wait) { - IRDA_DEBUG(1, "FINAL_TIMER_EXPIRED when receiving a " - "frame! Waiting a little bit more!\n"); + pr_debug("FINAL_TIMER_EXPIRED when receiving a frame! Waiting a little bit more!\n"); irlap_start_final_timer(self, msecs_to_jiffies(300)); /* @@ -1530,18 +1521,18 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, if (self->retry_count < self->N2) { if (skb_peek(&self->wx_list) == NULL) { /* Retry sending the pf bit to the secondary */ - IRDA_DEBUG(4, "nrm_p: resending rr"); + pr_debug("nrm_p: resending rr"); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_rr_frame(self, CMD_FRAME); } else { - IRDA_DEBUG(4, "nrm_p: resend frames"); + pr_debug("nrm_p: resend frames"); irlap_resend_rejected_frames(self, CMD_FRAME); } irlap_start_final_timer(self, self->final_timeout); self->retry_count++; - IRDA_DEBUG(4, "irlap_state_nrm_p: FINAL_TIMER_EXPIRED:" - " retry_count=%d\n", self->retry_count); + pr_debug("irlap_state_nrm_p: FINAL_TIMER_EXPIRED: retry_count=%d\n", + self->retry_count); /* Early warning event. I'm using a pretty liberal * interpretation of the spec and generate an event @@ -1581,7 +1572,7 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, irlap_start_final_timer(self, 2 * self->final_timeout); break; case RECV_RD_RSP: - IRDA_DEBUG(1, "%s(), RECV_RD_RSP\n", __func__); + pr_debug("%s(), RECV_RD_RSP\n", __func__); irlap_flush_all_queues(self); irlap_next_state(self, LAP_XMIT_P); @@ -1589,8 +1580,8 @@ static int irlap_state_nrm_p(struct irlap_cb *self, IRLAP_EVENT event, irlap_disconnect_request(self); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %s\n", - __func__, irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", + __func__, irlap_event[event]); ret = -1; break; @@ -1610,7 +1601,7 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]); + pr_debug("%s(), event = %s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1636,8 +1627,8 @@ static int irlap_state_reset_wait(struct irlap_cb *self, IRLAP_EVENT event, irlap_next_state( self, LAP_PCLOSE); break; default: - IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, - irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", __func__, + irlap_event[event]); ret = -1; break; @@ -1657,7 +1648,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(3, "%s(), event = %s\n", __func__, irlap_event[event]); + pr_debug("%s(), event = %s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1715,7 +1706,7 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, * state */ if (!info) { - IRDA_DEBUG(3, "%s(), RECV_SNRM_CMD\n", __func__); + pr_debug("%s(), RECV_SNRM_CMD\n", __func__); irlap_initiate_connection_state(self); irlap_wait_min_turn_around(self, &self->qos_tx); irlap_send_ua_response_frame(self, &self->qos_rx); @@ -1723,14 +1714,13 @@ static int irlap_state_reset(struct irlap_cb *self, IRLAP_EVENT event, irlap_start_wd_timer(self, self->wd_timeout); irlap_next_state(self, LAP_NDM); } else { - IRDA_DEBUG(0, - "%s(), SNRM frame contained an I field!\n", - __func__); + pr_debug("%s(), SNRM frame contained an I field!\n", + __func__); } break; default: - IRDA_DEBUG(1, "%s(), Unknown event %s\n", - __func__, irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", + __func__, irlap_event[event]); ret = -1; break; @@ -1750,7 +1740,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[event]); + pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -1786,8 +1776,8 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, * speed and turn-around-time. */ if((!nextfit) && (skb->len > self->bytes_left)) { - IRDA_DEBUG(0, "%s(), Not allowed to transmit" - " more bytes!\n", __func__); + pr_debug("%s(), Not allowed to transmit more bytes!\n", + __func__); /* Requeue the skb */ skb_queue_head(&self->txq, skb_get(skb)); @@ -1833,7 +1823,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, ret = -EPROTO; } } else { - IRDA_DEBUG(2, "%s(), Unable to send!\n", __func__); + pr_debug("%s(), Unable to send!\n", __func__); skb_queue_head(&self->txq, skb_get(skb)); ret = -EPROTO; } @@ -1849,8 +1839,8 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event, * when we return... - Jean II */ break; default: - IRDA_DEBUG(2, "%s(), Unknown event %s\n", __func__, - irlap_event[event]); + pr_debug("%s(), Unknown event %s\n", __func__, + irlap_event[event]); ret = -EINVAL; break; @@ -1872,7 +1862,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, int nr_status; int ret = 0; - IRDA_DEBUG(4, "%s(), event=%s\n", __func__, irlap_event[ event]); + pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -1;); @@ -1880,10 +1870,9 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, switch (event) { case RECV_I_CMD: /* Optimize for the common case */ /* FIXME: must check for remote_busy below */ - IRDA_DEBUG(4, "%s(), event=%s nr=%d, vs=%d, ns=%d, " - "vr=%d, pf=%d\n", __func__, - irlap_event[event], info->nr, - self->vs, info->ns, self->vr, info->pf); + pr_debug("%s(), event=%s nr=%d, vs=%d, ns=%d, vr=%d, pf=%d\n", + __func__, irlap_event[event], info->nr, + self->vs, info->ns, self->vr, info->pf); self->retry_count = 0; @@ -1983,7 +1972,7 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, if ((ns_status == NS_EXPECTED) && (nr_status == NR_UNEXPECTED)) { if (info->pf) { - IRDA_DEBUG(4, "RECV_I_RSP: frame(s) lost\n"); + pr_debug("RECV_I_RSP: frame(s) lost\n"); self->vr = (self->vr + 1) % 8; @@ -2020,10 +2009,10 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, } if (ret == NR_INVALID) { - IRDA_DEBUG(0, "NRM_S, NR_INVALID not implemented!\n"); + pr_debug("NRM_S, NR_INVALID not implemented!\n"); } if (ret == NS_INVALID) { - IRDA_DEBUG(0, "NRM_S, NS_INVALID not implemented!\n"); + pr_debug("NRM_S, NS_INVALID not implemented!\n"); } break; case RECV_UI_FRAME: @@ -2112,22 +2101,21 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, /* Keep state */ irlap_next_state(self, LAP_NRM_S); } else { - IRDA_DEBUG(1, "%s(), invalid nr not implemented!\n", - __func__); + pr_debug("%s(), invalid nr not implemented!\n", + __func__); } break; case RECV_SNRM_CMD: /* SNRM frame is not allowed to contain an I-field */ if (!info) { del_timer(&self->wd_timer); - IRDA_DEBUG(1, "%s(), received SNRM cmd\n", __func__); + pr_debug("%s(), received SNRM cmd\n", __func__); irlap_next_state(self, LAP_RESET_CHECK); irlap_reset_indication(self); } else { - IRDA_DEBUG(0, - "%s(), SNRM frame contained an I-field!\n", - __func__); + pr_debug("%s(), SNRM frame contained an I-field!\n", + __func__); } break; @@ -2159,8 +2147,8 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, * which explain why we use (self->N2 / 2) here !!! * Jean II */ - IRDA_DEBUG(1, "%s(), retry_count = %d\n", __func__, - self->retry_count); + pr_debug("%s(), retry_count = %d\n", __func__, + self->retry_count); if (self->retry_count < (self->N2 / 2)) { /* No retry, just wait for primary */ @@ -2212,8 +2200,8 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, irlap_send_test_frame(self, self->caddr, info->daddr, skb); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, (%s)\n", __func__, + event, irlap_event[event]); ret = -EINVAL; break; @@ -2227,8 +2215,6 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event, static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, struct sk_buff *skb, struct irlap_info *info) { - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -2284,8 +2270,8 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, break; /* stay in SCLOSE */ } - IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, (%s)\n", __func__, + event, irlap_event[event]); break; } @@ -2299,7 +2285,7 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event, { int ret = 0; - IRDA_DEBUG(1, "%s(), event=%s\n", __func__, irlap_event[event]); + pr_debug("%s(), event=%s\n", __func__, irlap_event[event]); IRDA_ASSERT(self != NULL, return -ENODEV;); IRDA_ASSERT(self->magic == LAP_MAGIC, return -EBADR;); @@ -2320,8 +2306,8 @@ static int irlap_state_reset_check( struct irlap_cb *self, IRLAP_EVENT event, irlap_next_state(self, LAP_SCLOSE); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, - event, irlap_event[event]); + pr_debug("%s(), Unknown event %d, (%s)\n", __func__, + event, irlap_event[event]); ret = -EINVAL; break; diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c index a37998c6273d..b936b1251a66 100644 --- a/net/irda/irlap_frame.c +++ b/net/irda/irlap_frame.c @@ -103,8 +103,8 @@ void irlap_queue_xmit(struct irlap_cb *self, struct sk_buff *skb) irlap_insert_info(self, skb); if (unlikely(self->mode & IRDA_MODE_MONITOR)) { - IRDA_DEBUG(3, "%s(): %s is in monitor mode\n", __func__, - self->netdev->name); + pr_debug("%s(): %s is in monitor mode\n", __func__, + self->netdev->name); dev_kfree_skb(skb); return; } @@ -182,8 +182,8 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb, /* Check if the new connection address is valid */ if ((info->caddr == 0x00) || (info->caddr == 0xfe)) { - IRDA_DEBUG(3, "%s(), invalid connection address!\n", - __func__); + pr_debug("%s(), invalid connection address!\n", + __func__); return; } @@ -193,8 +193,8 @@ static void irlap_recv_snrm_cmd(struct irlap_cb *self, struct sk_buff *skb, /* Only accept if addressed directly to us */ if (info->saddr != self->saddr) { - IRDA_DEBUG(2, "%s(), not addressed to us!\n", - __func__); + pr_debug("%s(), not addressed to us!\n", + __func__); return; } irlap_do_event(self, RECV_SNRM_CMD, skb, info); @@ -216,7 +216,7 @@ void irlap_send_ua_response_frame(struct irlap_cb *self, struct qos_info *qos) struct ua_frame *frame; int ret; - IRDA_DEBUG(2, "%s() <%ld>\n", __func__, jiffies); + pr_debug("%s() <%ld>\n", __func__, jiffies); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -291,8 +291,6 @@ void irlap_send_disc_frame(struct irlap_cb *self) struct sk_buff *tx_skb = NULL; struct disc_frame *frame; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -322,8 +320,8 @@ void irlap_send_discovery_xid_frame(struct irlap_cb *self, int S, __u8 s, __u32 bcast = BROADCAST; __u8 *info; - IRDA_DEBUG(4, "%s(), s=%d, S=%d, command=%d\n", __func__, - s, S, command); + pr_debug("%s(), s=%d, S=%d, command=%d\n", __func__, + s, S, command); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); @@ -415,13 +413,11 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, __u8 *discovery_info; char *text; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); return; } @@ -432,13 +428,13 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { - IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", - __func__); + pr_debug("%s(), frame is not addressed to us!\n", + __func__); return; } if ((discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC)) == NULL) { - IRDA_WARNING("%s: kmalloc failed!\n", __func__); + net_warn_ratelimited("%s: kmalloc failed!\n", __func__); return; } @@ -446,15 +442,15 @@ static void irlap_recv_discovery_xid_rsp(struct irlap_cb *self, discovery->data.saddr = self->saddr; discovery->timestamp = jiffies; - IRDA_DEBUG(4, "%s(), daddr=%08x\n", __func__, - discovery->data.daddr); + pr_debug("%s(), daddr=%08x\n", __func__, + discovery->data.daddr); discovery_info = skb_pull(skb, sizeof(struct xid_frame)); /* Get info returned from peer */ discovery->data.hints[0] = discovery_info[0]; if (discovery_info[0] & HINT_EXTENSION) { - IRDA_DEBUG(4, "EXTENSION\n"); + pr_debug("EXTENSION\n"); discovery->data.hints[1] = discovery_info[1]; discovery->data.charset = discovery_info[2]; text = (char *) &discovery_info[3]; @@ -492,7 +488,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, char *text; if (!pskb_may_pull(skb, sizeof(struct xid_frame))) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); return; } @@ -503,8 +499,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, /* Make sure frame is addressed to us */ if ((info->saddr != self->saddr) && (info->saddr != BROADCAST)) { - IRDA_DEBUG(0, "%s(), frame is not addressed to us!\n", - __func__); + pr_debug("%s(), frame is not addressed to us!\n", + __func__); return; } @@ -536,8 +532,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, /* Check if things are sane at this point... */ if((discovery_info == NULL) || !pskb_may_pull(skb, 3)) { - IRDA_ERROR("%s: discovery frame too short!\n", - __func__); + net_err_ratelimited("%s: discovery frame too short!\n", + __func__); return; } @@ -545,10 +541,8 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self, * We now have some discovery info to deliver! */ discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC); - if (!discovery) { - IRDA_WARNING("%s: unable to malloc!\n", __func__); + if (!discovery) return; - } discovery->data.daddr = info->daddr; discovery->data.saddr = self->saddr; @@ -658,7 +652,7 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb, { info->nr = skb->data[1] >> 5; - IRDA_DEBUG(4, "%s(), nr=%d, %ld\n", __func__, info->nr, jiffies); + pr_debug("%s(), nr=%d, %ld\n", __func__, info->nr, jiffies); if (command) irlap_do_event(self, RECV_RNR_CMD, skb, info); @@ -669,8 +663,6 @@ static void irlap_recv_rnr_frame(struct irlap_cb *self, struct sk_buff *skb, static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(0, "%s()\n", __func__); - info->nr = skb->data[1] >> 5; /* Check if this is a command or a response frame */ @@ -683,8 +675,6 @@ static void irlap_recv_rej_frame(struct irlap_cb *self, struct sk_buff *skb, static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(0, "%s()\n", __func__); - info->nr = skb->data[1] >> 5; /* Check if this is a command or a response frame */ @@ -697,8 +687,6 @@ static void irlap_recv_srej_frame(struct irlap_cb *self, struct sk_buff *skb, static void irlap_recv_disc_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info, int command) { - IRDA_DEBUG(2, "%s()\n", __func__); - /* Check if this is a command or a response frame */ if (command) irlap_do_event(self, RECV_DISC_CMD, skb, info); @@ -756,7 +744,7 @@ void irlap_send_data_primary(struct irlap_cb *self, struct sk_buff *skb) irlap_send_i_frame( self, tx_skb, CMD_FRAME); } else { - IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__); + pr_debug("%s(), sending unreliable frame\n", __func__); irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); self->window -= 1; } @@ -809,7 +797,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) irlap_next_state(self, LAP_NRM_P); irlap_send_i_frame(self, tx_skb, CMD_FRAME); } else { - IRDA_DEBUG(4, "%s(), sending unreliable frame\n", __func__); + pr_debug("%s(), sending unreliable frame\n", __func__); if (self->ack_required) { irlap_send_ui_frame(self, skb_get(skb), self->caddr, CMD_FRAME); @@ -836,7 +824,9 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb) * See max_line_capacities[][] in qos.c for details. Jean II */ transmission_time -= (self->final_timeout * self->bytes_left / self->line_capacity); - IRDA_DEBUG(4, "%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", __func__, self->final_timeout, self->bytes_left, self->line_capacity, transmission_time); + pr_debug("%s() adjusting transmission_time : ft=%d, bl=%d, lc=%d -> tt=%d\n", + __func__, self->final_timeout, self->bytes_left, + self->line_capacity, transmission_time); /* We are allowed to transmit a maximum number of bytes again. */ self->bytes_left = self->line_capacity; @@ -997,7 +987,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ tx_skb = skb_copy(skb, GFP_ATOMIC); if (!tx_skb) { - IRDA_DEBUG(0, "%s(), unable to copy\n", __func__); + pr_debug("%s(), unable to copy\n", __func__); return; } @@ -1020,7 +1010,7 @@ void irlap_resend_rejected_frames(struct irlap_cb *self, int command) */ while (!skb_queue_empty(&self->txq)) { - IRDA_DEBUG(0, "%s(), sending additional frames!\n", __func__); + pr_debug("%s(), sending additional frames!\n", __func__); if (self->window > 0) { skb = skb_dequeue( &self->txq); IRDA_ASSERT(skb != NULL, return;); @@ -1060,7 +1050,7 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command) /* tx_skb = skb_clone( skb, GFP_ATOMIC); */ tx_skb = skb_copy(skb, GFP_ATOMIC); if (!tx_skb) { - IRDA_DEBUG(0, "%s(), unable to copy\n", __func__); + pr_debug("%s(), unable to copy\n", __func__); return; } @@ -1083,8 +1073,6 @@ void irlap_resend_rejected_frame(struct irlap_cb *self, int command) void irlap_send_ui_frame(struct irlap_cb *self, struct sk_buff *skb, __u8 caddr, int command) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -1143,8 +1131,6 @@ static inline void irlap_recv_i_frame(struct irlap_cb *self, static void irlap_recv_ui_frame(struct irlap_cb *self, struct sk_buff *skb, struct irlap_info *info) { - IRDA_DEBUG( 4, "%s()\n", __func__); - info->pf = skb->data[1] & PF_BIT; /* Final bit */ irlap_do_event(self, RECV_UI_FRAME, skb, info); @@ -1162,15 +1148,13 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb, __u8 *frame; int w, x, y, z; - IRDA_DEBUG(0, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(info != NULL, return;); if (!pskb_may_pull(skb, 4)) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); return; } @@ -1186,21 +1170,16 @@ static void irlap_recv_frmr_frame(struct irlap_cb *self, struct sk_buff *skb, z = frame[3] & 0x08; if (w) { - IRDA_DEBUG(0, "Rejected control field is undefined or not " - "implemented.\n"); + pr_debug("Rejected control field is undefined or not implemented\n"); } if (x) { - IRDA_DEBUG(0, "Rejected control field was invalid because it " - "contained a non permitted I field.\n"); + pr_debug("Rejected control field was invalid because it contained a non permitted I field\n"); } if (y) { - IRDA_DEBUG(0, "Received I field exceeded the maximum negotiated " - "for the existing connection or exceeded the maximum " - "this station supports if no connection exists.\n"); + pr_debug("Received I field exceeded the maximum negotiated for the existing connection or exceeded the maximum this station supports if no connection exists\n"); } if (z) { - IRDA_DEBUG(0, "Rejected control field control field contained an " - "invalid Nr count.\n"); + pr_debug("Rejected control field control field contained an invalid Nr count\n"); } irlap_do_event(self, RECV_FRMR_RSP, skb, info); } @@ -1256,10 +1235,8 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb, { struct test_frame *frame; - IRDA_DEBUG(2, "%s()\n", __func__); - if (!pskb_may_pull(skb, sizeof(*frame))) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); return; } frame = (struct test_frame *) skb->data; @@ -1267,8 +1244,8 @@ static void irlap_recv_test_frame(struct irlap_cb *self, struct sk_buff *skb, /* Broadcast frames must carry saddr and daddr fields */ if (info->caddr == CBROADCAST) { if (skb->len < sizeof(struct test_frame)) { - IRDA_DEBUG(0, "%s() test frame too short!\n", - __func__); + pr_debug("%s() test frame too short!\n", + __func__); return; } @@ -1328,13 +1305,13 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, * share and non linear skbs. This should never happen, so * we don't need to be clever about it. Jean II */ if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { - IRDA_ERROR("%s: can't clone shared skb!\n", __func__); + net_err_ratelimited("%s: can't clone shared skb!\n", __func__); goto err; } /* Check if frame is large enough for parsing */ if (!pskb_may_pull(skb, 2)) { - IRDA_ERROR("%s: frame too short!\n", __func__); + net_err_ratelimited("%s: frame too short!\n", __func__); goto err; } @@ -1348,8 +1325,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, /* First we check if this frame has a valid connection address */ if ((info.caddr != self->caddr) && (info.caddr != CBROADCAST)) { - IRDA_DEBUG(0, "%s(), wrong connection address!\n", - __func__); + pr_debug("%s(), wrong connection address!\n", + __func__); goto out; } /* @@ -1383,8 +1360,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, irlap_recv_srej_frame(self, skb, &info, command); break; default: - IRDA_WARNING("%s: Unknown S-frame %02x received!\n", - __func__, info.control); + net_warn_ratelimited("%s: Unknown S-frame %02x received!\n", + __func__, info.control); break; } goto out; @@ -1421,8 +1398,8 @@ int irlap_driver_rcv(struct sk_buff *skb, struct net_device *dev, irlap_recv_ui_frame(self, skb, &info); break; default: - IRDA_WARNING("%s: Unknown frame %02x received!\n", - __func__, info.control); + net_warn_ratelimited("%s: Unknown frame %02x received!\n", + __func__, info.control); break; } out: diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index a5f28d421ea8..a26c401ef4a4 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -83,7 +83,6 @@ const char *irlmp_reason_str(LM_REASON reason) */ int __init irlmp_init(void) { - IRDA_DEBUG(1, "%s()\n", __func__); /* Initialize the irlmp structure. */ irlmp = kzalloc( sizeof(struct irlmp_cb), GFP_KERNEL); if (irlmp == NULL) @@ -170,10 +169,8 @@ struct lsap_cb *irlmp_open_lsap(__u8 slsap_sel, notify_t *notify, __u8 pid) /* Allocate new instance of a LSAP connection */ self = kzalloc(sizeof(struct lsap_cb), GFP_ATOMIC); - if (self == NULL) { - IRDA_ERROR("%s: can't allocate memory\n", __func__); + if (self == NULL) return NULL; - } self->magic = LMP_LSAP_MAGIC; self->slsap_sel = slsap_sel; @@ -209,8 +206,6 @@ EXPORT_SYMBOL(irlmp_open_lsap); */ static void __irlmp_close_lsap(struct lsap_cb *self) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); @@ -269,9 +264,8 @@ void irlmp_close_lsap(struct lsap_cb *self) NULL); } if (!lsap) { - IRDA_DEBUG(0, - "%s(), Looks like somebody has removed me already!\n", - __func__); + pr_debug("%s(), Looks like somebody has removed me already!\n", + __func__); return; } __irlmp_close_lsap(self); @@ -297,10 +291,8 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify) * Allocate new instance of a LSAP connection */ lap = kzalloc(sizeof(struct lap_cb), GFP_KERNEL); - if (lap == NULL) { - IRDA_ERROR("%s: unable to kmalloc\n", __func__); + if (lap == NULL) return; - } lap->irlap = irlap; lap->magic = LMP_LAP_MAGIC; @@ -311,7 +303,8 @@ void irlmp_register_link(struct irlap_cb *irlap, __u32 saddr, notify_t *notify) #endif lap->lsaps = hashbin_new(HB_LOCK); if (lap->lsaps == NULL) { - IRDA_WARNING("%s(), unable to kmalloc lsaps\n", __func__); + net_warn_ratelimited("%s(), unable to kmalloc lsaps\n", + __func__); kfree(lap); return; } @@ -343,8 +336,6 @@ void irlmp_unregister_link(__u32 saddr) { struct lap_cb *link; - IRDA_DEBUG(4, "%s()\n", __func__); - /* We must remove ourselves from the hashbin *first*. This ensure * that no more LSAPs will be open on this link and no discovery * will be triggered anymore. Jean II */ @@ -386,9 +377,8 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, IRDA_ASSERT(self != NULL, return -EBADR;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -EBADR;); - IRDA_DEBUG(2, - "%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n", - __func__, self->slsap_sel, dlsap_sel, saddr, daddr); + pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x, saddr=%08x, daddr=%08x\n", + __func__, self->slsap_sel, dlsap_sel, saddr, daddr); if (test_bit(0, &self->connected)) { ret = -EISCONN; @@ -432,7 +422,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, if (daddr != DEV_ADDR_ANY) discovery = hashbin_find(irlmp->cachelog, daddr, NULL); else { - IRDA_DEBUG(2, "%s(), no daddr\n", __func__); + pr_debug("%s(), no daddr\n", __func__); discovery = (discovery_t *) hashbin_get_first(irlmp->cachelog); } @@ -445,7 +435,7 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, } lap = hashbin_lock_find(irlmp->links, saddr, NULL); if (lap == NULL) { - IRDA_DEBUG(1, "%s(), Unable to find a usable link!\n", __func__); + pr_debug("%s(), Unable to find a usable link!\n", __func__); ret = -EHOSTUNREACH; goto err; } @@ -460,14 +450,15 @@ int irlmp_connect_request(struct lsap_cb *self, __u8 dlsap_sel, * disconnected yet (waiting for timeout in LAP). * Maybe we could give LAP a bit of help in this case. */ - IRDA_DEBUG(0, "%s(), sorry, but I'm waiting for LAP to timeout!\n", __func__); + pr_debug("%s(), sorry, but I'm waiting for LAP to timeout!\n", + __func__); ret = -EAGAIN; goto err; } /* LAP is already connected to a different node, and LAP * can only talk to one node at a time */ - IRDA_DEBUG(0, "%s(), sorry, but link is busy!\n", __func__); + pr_debug("%s(), sorry, but link is busy!\n", __func__); ret = -EBUSY; goto err; } @@ -528,8 +519,8 @@ void irlmp_connect_indication(struct lsap_cb *self, struct sk_buff *skb) IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(self->lap != NULL, return;); - IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", - __func__, self->slsap_sel, self->dlsap_sel); + pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n", + __func__, self->slsap_sel, self->dlsap_sel); /* Note : self->lap is set in irlmp_link_data_indication(), * (case CONNECT_CMD:) because we have no way to set it here. @@ -569,8 +560,8 @@ int irlmp_connect_response(struct lsap_cb *self, struct sk_buff *userdata) /* We set the connected bit and move the lsap to the connected list * in the state machine itself. Jean II */ - IRDA_DEBUG(2, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", - __func__, self->slsap_sel, self->dlsap_sel); + pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n", + __func__, self->slsap_sel, self->dlsap_sel); /* Make room for MUX control header (3 bytes) */ IRDA_ASSERT(skb_headroom(userdata) >= LMP_CONTROL_HEADER, return -1;); @@ -596,8 +587,6 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb) int lap_header_size; int max_seg_size; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(skb != NULL, return;); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); @@ -609,8 +598,8 @@ void irlmp_connect_confirm(struct lsap_cb *self, struct sk_buff *skb) lap_header_size = IRLAP_GET_HEADER_SIZE(self->lap->irlap); max_header_size = LMP_HEADER + lap_header_size; - IRDA_DEBUG(2, "%s(), max_header_size=%d\n", - __func__, max_header_size); + pr_debug("%s(), max_header_size=%d\n", + __func__, max_header_size); /* Hide LMP_CONTROL_HEADER header from layer above */ skb_pull(skb, LMP_CONTROL_HEADER); @@ -636,16 +625,14 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) struct lsap_cb *new; unsigned long flags; - IRDA_DEBUG(1, "%s()\n", __func__); - spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags); /* Only allowed to duplicate unconnected LSAP's, and only LSAPs * that have received a connect indication. Jean II */ if ((!hashbin_find(irlmp->unconnected_lsaps, (long) orig, NULL)) || (orig->lap == NULL)) { - IRDA_DEBUG(0, "%s(), invalid LSAP (wrong state)\n", - __func__); + pr_debug("%s(), invalid LSAP (wrong state)\n", + __func__); spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); return NULL; @@ -654,7 +641,7 @@ struct lsap_cb *irlmp_dup(struct lsap_cb *orig, void *instance) /* Allocate a new instance */ new = kmemdup(orig, sizeof(*new), GFP_ATOMIC); if (!new) { - IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); + pr_debug("%s(), unable to kmalloc\n", __func__); spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags); return NULL; @@ -700,7 +687,7 @@ int irlmp_disconnect_request(struct lsap_cb *self, struct sk_buff *userdata) * and us that might mess up the hashbins below. This fixes it. * Jean II */ if (! test_and_clear_bit(0, &self->connected)) { - IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__); + pr_debug("%s(), already disconnected!\n", __func__); dev_kfree_skb(userdata); return -1; } @@ -754,20 +741,20 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, { struct lsap_cb *lsap; - IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__, - irlmp_reason_str(reason), reason); + pr_debug("%s(), reason=%s [%d]\n", __func__, + irlmp_reason_str(reason), reason); IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); - IRDA_DEBUG(3, "%s(), slsap_sel=%02x, dlsap_sel=%02x\n", - __func__, self->slsap_sel, self->dlsap_sel); + pr_debug("%s(), slsap_sel=%02x, dlsap_sel=%02x\n", + __func__, self->slsap_sel, self->dlsap_sel); /* Already disconnected ? * There is a race condition between irlmp_disconnect_request() * and us that might mess up the hashbins below. This fixes it. * Jean II */ if (! test_and_clear_bit(0, &self->connected)) { - IRDA_DEBUG(0, "%s(), already disconnected!\n", __func__); + pr_debug("%s(), already disconnected!\n", __func__); return; } @@ -800,7 +787,7 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason, self->notify.disconnect_indication(self->notify.instance, self, reason, skb); } else { - IRDA_DEBUG(0, "%s(), no handler\n", __func__); + pr_debug("%s(), no handler\n", __func__); } } @@ -852,8 +839,8 @@ void irlmp_do_discovery(int nslots) /* Make sure the value is sane */ if ((nslots != 1) && (nslots != 6) && (nslots != 8) && (nslots != 16)){ - IRDA_WARNING("%s: invalid value for number of slots!\n", - __func__); + net_warn_ratelimited("%s: invalid value for number of slots!\n", + __func__); nslots = sysctl_discovery_slots = 8; } @@ -971,8 +958,6 @@ irlmp_notify_client(irlmp_client_t *client, int number; /* Number of nodes in the log */ int i; - IRDA_DEBUG(3, "%s()\n", __func__); - /* Check if client wants or not partial/selective log (optimisation) */ if (!client->disco_callback) return; @@ -1022,8 +1007,6 @@ void irlmp_discovery_confirm(hashbin_t *log, DISCOVERY_MODE mode) irlmp_client_t *client; irlmp_client_t *client_next; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(log != NULL, return;); if (!(HASHBIN_GET_SIZE(log))) @@ -1057,8 +1040,6 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number) irlmp_client_t *client_next; int i; - IRDA_DEBUG(3, "%s()\n", __func__); - IRDA_ASSERT(expiries != NULL, return;); /* For each client - notify callback may touch client list */ @@ -1091,8 +1072,6 @@ void irlmp_discovery_expiry(discinfo_t *expiries, int number) */ discovery_t *irlmp_get_discovery_response(void) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(irlmp != NULL, return NULL;); put_unaligned(irlmp->hints.word, (__u16 *)irlmp->discovery_rsp.data.hints); @@ -1169,8 +1148,6 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata) { int ret; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(userdata != NULL, return -1;); /* Make room for MUX header */ @@ -1193,8 +1170,6 @@ int irlmp_udata_request(struct lsap_cb *self, struct sk_buff *userdata) */ void irlmp_udata_indication(struct lsap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -1220,8 +1195,6 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata, struct sk_buff *clone_skb; struct lap_cb *lap; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(userdata != NULL, return -1;); /* Make room for MUX and PID header */ @@ -1271,8 +1244,6 @@ int irlmp_connless_data_request(struct lsap_cb *self, struct sk_buff *userdata, #ifdef CONFIG_IRDA_ULTRA void irlmp_connless_data_indication(struct lsap_cb *self, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -1314,7 +1285,7 @@ void irlmp_status_indication(struct lap_cb *self, curr->notify.status_indication(curr->notify.instance, link, lock); else - IRDA_DEBUG(2, "%s(), no handler\n", __func__); + pr_debug("%s(), no handler\n", __func__); curr = next; } @@ -1342,7 +1313,7 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow) /* Get the number of lsap. That's the only safe way to know * that we have looped around... - Jean II */ lsap_todo = HASHBIN_GET_SIZE(self->lsaps); - IRDA_DEBUG(4, "%s() : %d lsaps to scan\n", __func__, lsap_todo); + pr_debug("%s() : %d lsaps to scan\n", __func__, lsap_todo); /* Poll lsap in order until the queue is full or until we * tried them all. @@ -1361,14 +1332,16 @@ void irlmp_flow_indication(struct lap_cb *self, LOCAL_FLOW flow) /* Uh-oh... Paranoia */ if(curr == NULL) break; - IRDA_DEBUG(4, "%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", __func__, curr, next, self->flow_next, lsap_todo, IRLAP_GET_TX_QUEUE_LEN(self->irlap)); + pr_debug("%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", + __func__, curr, next, self->flow_next, lsap_todo, + IRLAP_GET_TX_QUEUE_LEN(self->irlap)); /* Inform lsap user that it can send one more packet. */ if (curr->notify.flow_indication != NULL) curr->notify.flow_indication(curr->notify.instance, curr, flow); else - IRDA_DEBUG(1, "%s(), no handler\n", __func__); + pr_debug("%s(), no handler\n", __func__); } } @@ -1389,32 +1362,30 @@ __u8 *irlmp_hint_to_service(__u8 *hint) * since we currently only support 2 hint bytes */ service = kmalloc(16, GFP_ATOMIC); - if (!service) { - IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__); + if (!service) return NULL; - } if (!hint[0]) { - IRDA_DEBUG(1, "<None>\n"); + pr_debug("<None>\n"); kfree(service); return NULL; } if (hint[0] & HINT_PNP) - IRDA_DEBUG(1, "PnP Compatible "); + pr_debug("PnP Compatible "); if (hint[0] & HINT_PDA) - IRDA_DEBUG(1, "PDA/Palmtop "); + pr_debug("PDA/Palmtop "); if (hint[0] & HINT_COMPUTER) - IRDA_DEBUG(1, "Computer "); + pr_debug("Computer "); if (hint[0] & HINT_PRINTER) { - IRDA_DEBUG(1, "Printer "); + pr_debug("Printer "); service[i++] = S_PRINTER; } if (hint[0] & HINT_MODEM) - IRDA_DEBUG(1, "Modem "); + pr_debug("Modem "); if (hint[0] & HINT_FAX) - IRDA_DEBUG(1, "Fax "); + pr_debug("Fax "); if (hint[0] & HINT_LAN) { - IRDA_DEBUG(1, "LAN Access "); + pr_debug("LAN Access "); service[i++] = S_LAN; } /* @@ -1424,22 +1395,22 @@ __u8 *irlmp_hint_to_service(__u8 *hint) */ if (hint[0] & HINT_EXTENSION) { if (hint[1] & HINT_TELEPHONY) { - IRDA_DEBUG(1, "Telephony "); + pr_debug("Telephony "); service[i++] = S_TELEPHONY; } if (hint[1] & HINT_FILE_SERVER) - IRDA_DEBUG(1, "File Server "); + pr_debug("File Server "); if (hint[1] & HINT_COMM) { - IRDA_DEBUG(1, "IrCOMM "); + pr_debug("IrCOMM "); service[i++] = S_COMM; } if (hint[1] & HINT_OBEX) { - IRDA_DEBUG(1, "IrOBEX "); + pr_debug("IrOBEX "); service[i++] = S_OBEX; } } - IRDA_DEBUG(1, "\n"); + pr_debug("\n"); /* So that client can be notified about any discovery */ service[i++] = S_ANY; @@ -1492,14 +1463,13 @@ void *irlmp_register_service(__u16 hints) { irlmp_service_t *service; - IRDA_DEBUG(4, "%s(), hints = %04x\n", __func__, hints); + pr_debug("%s(), hints = %04x\n", __func__, hints); /* Make a new registration */ service = kmalloc(sizeof(irlmp_service_t), GFP_ATOMIC); - if (!service) { - IRDA_DEBUG(1, "%s(), Unable to kmalloc!\n", __func__); + if (!service) return NULL; - } + service->hints.word = hints; hashbin_insert(irlmp->services, (irda_queue_t *) service, (long) service, NULL); @@ -1522,15 +1492,13 @@ int irlmp_unregister_service(void *handle) irlmp_service_t *service; unsigned long flags; - IRDA_DEBUG(4, "%s()\n", __func__); - if (!handle) return -1; /* Caller may call with invalid handle (it's legal) - Jean II */ service = hashbin_lock_find(irlmp->services, (long) handle, NULL); if (!service) { - IRDA_DEBUG(1, "%s(), Unknown service!\n", __func__); + pr_debug("%s(), Unknown service!\n", __func__); return -1; } @@ -1567,15 +1535,12 @@ void *irlmp_register_client(__u16 hint_mask, DISCOVERY_CALLBACK1 disco_clb, { irlmp_client_t *client; - IRDA_DEBUG(1, "%s()\n", __func__); IRDA_ASSERT(irlmp != NULL, return NULL;); /* Make a new registration */ client = kmalloc(sizeof(irlmp_client_t), GFP_ATOMIC); - if (!client) { - IRDA_DEBUG( 1, "%s(), Unable to kmalloc!\n", __func__); + if (!client) return NULL; - } /* Register the details */ client->hint_mask.word = hint_mask; @@ -1609,7 +1574,7 @@ int irlmp_update_client(void *handle, __u16 hint_mask, client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); if (!client) { - IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__); + pr_debug("%s(), Unknown client!\n", __func__); return -1; } @@ -1632,19 +1597,17 @@ int irlmp_unregister_client(void *handle) { struct irlmp_client *client; - IRDA_DEBUG(4, "%s()\n", __func__); - if (!handle) return -1; /* Caller may call with invalid handle (it's legal) - Jean II */ client = hashbin_lock_find(irlmp->clients, (long) handle, NULL); if (!client) { - IRDA_DEBUG(1, "%s(), Unknown client!\n", __func__); + pr_debug("%s(), Unknown client!\n", __func__); return -1; } - IRDA_DEBUG(4, "%s(), removing client!\n", __func__); + pr_debug("%s(), removing client!\n", __func__); hashbin_remove_this(irlmp->clients, (irda_queue_t *) client); kfree(client); @@ -1673,8 +1636,6 @@ static int irlmp_slsap_inuse(__u8 slsap_sel) IRDA_ASSERT(irlmp->magic == LMP_MAGIC, return TRUE;); IRDA_ASSERT(slsap_sel != LSAP_ANY, return TRUE;); - IRDA_DEBUG(4, "%s()\n", __func__); - #ifdef CONFIG_IRDA_ULTRA /* Accept all bindings to the connectionless LSAP */ if (slsap_sel == LSAP_CONNLESS) @@ -1708,8 +1669,8 @@ static int irlmp_slsap_inuse(__u8 slsap_sel) goto errlsap;); if ((self->slsap_sel == slsap_sel)) { - IRDA_DEBUG(4, "Source LSAP selector=%02x in use\n", - self->slsap_sel); + pr_debug("Source LSAP selector=%02x in use\n", + self->slsap_sel); goto errlsap; } self = (struct lsap_cb*) hashbin_get_next(lap->lsaps); @@ -1733,8 +1694,8 @@ static int irlmp_slsap_inuse(__u8 slsap_sel) while (self != NULL) { IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, goto erruncon;); if ((self->slsap_sel == slsap_sel)) { - IRDA_DEBUG(4, "Source LSAP selector=%02x in use (unconnected)\n", - self->slsap_sel); + pr_debug("Source LSAP selector=%02x in use (unconnected)\n", + self->slsap_sel); goto erruncon; } self = (struct lsap_cb*) hashbin_get_next(irlmp->unconnected_lsaps); @@ -1799,8 +1760,8 @@ static __u8 irlmp_find_free_slsap(void) /* Make sure we terminate the loop */ if (wrapped++) { - IRDA_ERROR("%s: no more free LSAPs !\n", - __func__); + net_err_ratelimited("%s: no more free LSAPs !\n", + __func__); return 0; } } @@ -1814,8 +1775,8 @@ static __u8 irlmp_find_free_slsap(void) /* Got it ! */ lsap_sel = irlmp->last_lsap_sel; - IRDA_DEBUG(4, "%s(), found free lsap_sel=%02x\n", - __func__, lsap_sel); + pr_debug("%s(), found free lsap_sel=%02x\n", + __func__, lsap_sel); return lsap_sel; } @@ -1833,26 +1794,27 @@ LM_REASON irlmp_convert_lap_reason( LAP_REASON lap_reason) switch (lap_reason) { case LAP_DISC_INDICATION: /* Received a disconnect request from peer */ - IRDA_DEBUG( 1, "%s(), LAP_DISC_INDICATION\n", __func__); + pr_debug("%s(), LAP_DISC_INDICATION\n", __func__); reason = LM_USER_REQUEST; break; case LAP_NO_RESPONSE: /* To many retransmits without response */ - IRDA_DEBUG( 1, "%s(), LAP_NO_RESPONSE\n", __func__); + pr_debug("%s(), LAP_NO_RESPONSE\n", __func__); reason = LM_LAP_DISCONNECT; break; case LAP_RESET_INDICATION: - IRDA_DEBUG( 1, "%s(), LAP_RESET_INDICATION\n", __func__); + pr_debug("%s(), LAP_RESET_INDICATION\n", __func__); reason = LM_LAP_RESET; break; case LAP_FOUND_NONE: case LAP_MEDIA_BUSY: case LAP_PRIMARY_CONFLICT: - IRDA_DEBUG(1, "%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", __func__); + pr_debug("%s(), LAP_FOUND_NONE, LAP_MEDIA_BUSY or LAP_PRIMARY_CONFLICT\n", + __func__); reason = LM_CONNECT_FAILURE; break; default: - IRDA_DEBUG(1, "%s(), Unknown IrLAP disconnect reason %d!\n", - __func__, lap_reason); + pr_debug("%s(), Unknown IrLAP disconnect reason %d!\n", + __func__, lap_reason); reason = LM_LAP_DISCONNECT; break; } diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c index 9505a7d06f1a..e306cf2c1e04 100644 --- a/net/irda/irlmp_event.c +++ b/net/irda/irlmp_event.c @@ -48,8 +48,7 @@ const char *const irlsap_state[] = { "LSAP_SETUP_PEND", }; -#ifdef CONFIG_IRDA_DEBUG -static const char *const irlmp_event[] = { +static const char *const irlmp_event[] __maybe_unused = { "LM_CONNECT_REQUEST", "LM_CONNECT_CONFIRM", "LM_CONNECT_RESPONSE", @@ -75,7 +74,6 @@ static const char *const irlmp_event[] = { "LM_LAP_DISCOVERY_CONFIRM", "LM_LAP_IDLE_TIMEOUT", }; -#endif /* CONFIG_IRDA_DEBUG */ /* LAP Connection control proto declarations */ static void irlmp_state_standby (struct lap_cb *, IRLMP_EVENT, @@ -120,7 +118,7 @@ static inline void irlmp_next_lap_state(struct lap_cb *self, IRLMP_STATE state) { /* - IRDA_DEBUG(4, "%s(), LMP LAP = %s\n", __func__, irlmp_state[state]); + pr_debug("%s(), LMP LAP = %s\n", __func__, irlmp_state[state]); */ self->lap_state = state; } @@ -130,7 +128,7 @@ static inline void irlmp_next_lsap_state(struct lsap_cb *self, { /* IRDA_ASSERT(self != NULL, return;); - IRDA_DEBUG(4, "%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]); + pr_debug("%s(), LMP LSAP = %s\n", __func__, irlsap_state[state]); */ self->lsap_state = state; } @@ -142,8 +140,8 @@ int irlmp_do_lsap_event(struct lsap_cb *self, IRLMP_EVENT event, IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); - IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", - __func__, irlmp_event[event], irlsap_state[ self->lsap_state]); + pr_debug("%s(), EVENT = %s, STATE = %s\n", + __func__, irlmp_event[event], irlsap_state[self->lsap_state]); return (*lsap_state[self->lsap_state]) (self, event, skb); } @@ -160,17 +158,15 @@ void irlmp_do_lap_event(struct lap_cb *self, IRLMP_EVENT event, IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); - IRDA_DEBUG(4, "%s(), EVENT = %s, STATE = %s\n", __func__, - irlmp_event[event], - irlmp_state[self->lap_state]); + pr_debug("%s(), EVENT = %s, STATE = %s\n", __func__, + irlmp_event[event], + irlmp_state[self->lap_state]); (*lap_state[self->lap_state]) (self, event, skb); } void irlmp_discovery_timer_expired(void *data) { - IRDA_DEBUG(4, "%s()\n", __func__); - /* We always cleanup the log (active & passive discovery) */ irlmp_do_expiry(); @@ -184,8 +180,6 @@ void irlmp_watchdog_timer_expired(void *data) { struct lsap_cb *self = (struct lsap_cb *) data; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); @@ -196,8 +190,6 @@ void irlmp_idle_timer_expired(void *data) { struct lap_cb *self = (struct lap_cb *) data; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); @@ -256,7 +248,6 @@ irlmp_do_all_lsap_event(hashbin_t * lsap_hashbin, static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); IRDA_ASSERT(self->irlap != NULL, return;); switch (event) { @@ -276,7 +267,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, irlap_connect_response(self->irlap, skb); break; case LM_LAP_CONNECT_REQUEST: - IRDA_DEBUG(4, "%s() LS_CONNECT_REQUEST\n", __func__); + pr_debug("%s() LS_CONNECT_REQUEST\n", __func__); irlmp_next_lap_state(self, LAP_U_CONNECT); @@ -284,14 +275,14 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, irlap_connect_request(self->irlap, self->daddr, NULL, 0); break; case LM_LAP_DISCONNECT_INDICATION: - IRDA_DEBUG(4, "%s(), Error LM_LAP_DISCONNECT_INDICATION\n", - __func__); + pr_debug("%s(), Error LM_LAP_DISCONNECT_INDICATION\n", + __func__); irlmp_next_lap_state(self, LAP_STANDBY); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s\n", - __func__, irlmp_event[event]); + pr_debug("%s(), Unknown event %s\n", + __func__, irlmp_event[event]); break; } } @@ -306,7 +297,7 @@ static void irlmp_state_standby(struct lap_cb *self, IRLMP_EVENT event, static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s(), event=%s\n", __func__, irlmp_event[event]); + pr_debug("%s(), event=%s\n", __func__, irlmp_event[event]); switch (event) { case LM_LAP_CONNECT_INDICATION: @@ -326,7 +317,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, * the lsaps may already have gone. This avoid getting stuck * forever in LAP_ACTIVE state - Jean II */ if (HASHBIN_GET_SIZE(self->lsaps) == 0) { - IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__); + pr_debug("%s() NO LSAPs !\n", __func__); irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); } break; @@ -344,12 +335,12 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, * the lsaps may already have gone. This avoid getting stuck * forever in LAP_ACTIVE state - Jean II */ if (HASHBIN_GET_SIZE(self->lsaps) == 0) { - IRDA_DEBUG(0, "%s() NO LSAPs !\n", __func__); + pr_debug("%s() NO LSAPs !\n", __func__); irlmp_start_idle_timer(self, LM_IDLE_TIMEOUT); } break; case LM_LAP_DISCONNECT_INDICATION: - IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__); + pr_debug("%s(), LM_LAP_DISCONNECT_INDICATION\n", __func__); irlmp_next_lap_state(self, LAP_STANDBY); /* Send disconnect event to all LSAPs using this link */ @@ -357,7 +348,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, LM_LAP_DISCONNECT_INDICATION); break; case LM_LAP_DISCONNECT_REQUEST: - IRDA_DEBUG(4, "%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__); + pr_debug("%s(), LM_LAP_DISCONNECT_REQUEST\n", __func__); /* One of the LSAP did timeout or was closed, if it was * the last one, try to get out of here - Jean II */ @@ -366,7 +357,7 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, } break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s\n", + pr_debug("%s(), Unknown event %s\n", __func__, irlmp_event[event]); break; } @@ -381,11 +372,9 @@ static void irlmp_state_u_connect(struct lap_cb *self, IRLMP_EVENT event, static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - switch (event) { case LM_LAP_CONNECT_REQUEST: - IRDA_DEBUG(4, "%s(), LS_CONNECT_REQUEST\n", __func__); + pr_debug("%s(), LS_CONNECT_REQUEST\n", __func__); /* * IrLAP may have a pending disconnect. We tried to close @@ -467,7 +456,7 @@ static void irlmp_state_active(struct lap_cb *self, IRLMP_EVENT event, irlmp_do_expiry(); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s\n", + pr_debug("%s(), Unknown event %s\n", __func__, irlmp_event[event]); break; } @@ -490,8 +479,6 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, { int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); @@ -505,11 +492,11 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, break; #endif /* CONFIG_IRDA_ULTRA */ case LM_CONNECT_REQUEST: - IRDA_DEBUG(4, "%s(), LM_CONNECT_REQUEST\n", __func__); + pr_debug("%s(), LM_CONNECT_REQUEST\n", __func__); if (self->conn_skb) { - IRDA_WARNING("%s: busy with another request!\n", - __func__); + net_warn_ratelimited("%s: busy with another request!\n", + __func__); return -EBUSY; } /* Don't forget to refcount it (see irlmp_connect_request()) */ @@ -525,8 +512,8 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, break; case LM_CONNECT_INDICATION: if (self->conn_skb) { - IRDA_WARNING("%s: busy with another request!\n", - __func__); + net_warn_ratelimited("%s: busy with another request!\n", + __func__); return -EBUSY; } /* Don't forget to refcount it (see irlap_driver_rcv()) */ @@ -551,8 +538,8 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event, irlmp_do_lap_event(self->lap, LM_LAP_CONNECT_REQUEST, NULL); break; default: - IRDA_DEBUG(1, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -570,8 +557,6 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, struct lsap_cb *lsap; int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); @@ -603,7 +588,7 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, case LM_WATCHDOG_TIMEOUT: /* May happen, who knows... * Jean II */ - IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); + pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__); /* Disconnect, get out... - Jean II */ self->lap = NULL; @@ -613,8 +598,8 @@ static int irlmp_state_connect(struct lsap_cb *self, IRLMP_EVENT event, default: /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we * are *not* yet bound to the IrLAP link. Jean II */ - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -632,8 +617,6 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, struct sk_buff *tx_skb; int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); @@ -642,17 +625,17 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, /* Keep state */ break; case LM_CONNECT_RESPONSE: - IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " - "no indication issued yet\n", __func__); + pr_debug("%s(), LM_CONNECT_RESPONSE, no indication issued yet\n", + __func__); /* Keep state */ break; case LM_DISCONNECT_REQUEST: - IRDA_DEBUG(0, "%s(), LM_DISCONNECT_REQUEST, " - "not yet bound to IrLAP connection\n", __func__); + pr_debug("%s(), LM_DISCONNECT_REQUEST, not yet bound to IrLAP connection\n", + __func__); /* Keep state */ break; case LM_LAP_CONNECT_CONFIRM: - IRDA_DEBUG(4, "%s(), LS_CONNECT_CONFIRM\n", __func__); + pr_debug("%s(), LS_CONNECT_CONFIRM\n", __func__); irlmp_next_lsap_state(self, LSAP_CONNECT); tx_skb = self->conn_skb; @@ -666,7 +649,7 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, /* Will happen in some rare cases because of a race condition. * Just make sure we don't stay there forever... * Jean II */ - IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); + pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__); /* Go back to disconnected mode, keep the socket waiting */ self->lap = NULL; @@ -679,8 +662,8 @@ static int irlmp_state_connect_pend(struct lsap_cb *self, IRLMP_EVENT event, default: /* LM_LAP_DISCONNECT_INDICATION : Should never happen, we * are *not* yet bound to the IrLAP link. Jean II */ - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -698,8 +681,6 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, LM_REASON reason; int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); IRDA_ASSERT(self->lap != NULL, return -1;); @@ -721,13 +702,13 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, irlmp_udata_indication(self, skb); break; case LM_CONNECT_REQUEST: - IRDA_DEBUG(0, "%s(), LM_CONNECT_REQUEST, " - "error, LSAP already connected\n", __func__); + pr_debug("%s(), LM_CONNECT_REQUEST, error, LSAP already connected\n", + __func__); /* Keep state */ break; case LM_CONNECT_RESPONSE: - IRDA_DEBUG(0, "%s(), LM_CONNECT_RESPONSE, " - "error, LSAP already connected\n", __func__); + pr_debug("%s(), LM_CONNECT_RESPONSE, error, LSAP already connected\n", + __func__); /* Keep state */ break; case LM_DISCONNECT_REQUEST: @@ -739,8 +720,8 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, /* Try to close the LAP connection if its still there */ if (self->lap) { - IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", - __func__); + pr_debug("%s(), trying to close IrLAP\n", + __func__); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); @@ -764,14 +745,14 @@ static int irlmp_state_dtr(struct lsap_cb *self, IRLMP_EVENT event, reason = skb->data[3]; /* Try to close the LAP connection */ - IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__); + pr_debug("%s(), trying to close IrLAP\n", __func__); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); irlmp_disconnect_indication(self, reason, skb); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -793,8 +774,6 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return -1;); - IRDA_DEBUG(4, "%s()\n", __func__); - switch (event) { case LM_CONNECT_CONFIRM: irlmp_next_lsap_state(self, LSAP_DATA_TRANSFER_READY); @@ -814,7 +793,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, reason = skb->data[3]; /* Try to close the LAP connection */ - IRDA_DEBUG(4, "%s(), trying to close IrLAP\n", __func__); + pr_debug("%s(), trying to close IrLAP\n", __func__); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); irlmp_disconnect_indication(self, reason, skb); @@ -832,7 +811,7 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, irlmp_disconnect_indication(self, reason, skb); break; case LM_WATCHDOG_TIMEOUT: - IRDA_DEBUG(0, "%s() WATCHDOG_TIMEOUT!\n", __func__); + pr_debug("%s() WATCHDOG_TIMEOUT!\n", __func__); IRDA_ASSERT(self->lap != NULL, return -1;); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); @@ -841,8 +820,8 @@ static int irlmp_state_setup(struct lsap_cb *self, IRLMP_EVENT event, irlmp_disconnect_indication(self, LM_CONNECT_FAILURE, NULL); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; @@ -863,8 +842,6 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, LM_REASON reason; int ret = 0; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(irlmp != NULL, return -1;); @@ -883,7 +860,7 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, irlmp_next_lsap_state(self, LSAP_SETUP); break; case LM_WATCHDOG_TIMEOUT: - IRDA_DEBUG(0, "%s() : WATCHDOG_TIMEOUT !\n", __func__); + pr_debug("%s() : WATCHDOG_TIMEOUT !\n", __func__); IRDA_ASSERT(self->lap != NULL, return -1;); irlmp_do_lap_event(self->lap, LM_LAP_DISCONNECT_REQUEST, NULL); @@ -901,8 +878,8 @@ static int irlmp_state_setup_pend(struct lsap_cb *self, IRLMP_EVENT event, irlmp_disconnect_indication(self, reason, NULL); break; default: - IRDA_DEBUG(0, "%s(), Unknown event %s on LSAP %#02x\n", - __func__, irlmp_event[event], self->slsap_sel); + pr_debug("%s(), Unknown event %s on LSAP %#02x\n", + __func__, irlmp_event[event], self->slsap_sel); break; } return ret; diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c index 062e63b1c5c4..38b0f994bc7b 100644 --- a/net/irda/irlmp_frame.c +++ b/net/irda/irlmp_frame.c @@ -44,7 +44,7 @@ inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, skb->data[1] = slsap; if (expedited) { - IRDA_DEBUG(4, "%s(), sending expedited data\n", __func__); + pr_debug("%s(), sending expedited data\n", __func__); irlap_data_request(self->irlap, skb, TRUE); } else irlap_data_request(self->irlap, skb, FALSE); @@ -60,8 +60,6 @@ void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap, { __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -95,8 +93,6 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, __u8 dlsap_sel; /* Destination LSAP address */ __u8 *fp; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb->len > 2, return;); @@ -115,9 +111,8 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, * it in a different way than other established connections. */ if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) { - IRDA_DEBUG(3, "%s(), incoming connection, " - "source LSAP=%d, dest LSAP=%d\n", - __func__, slsap_sel, dlsap_sel); + pr_debug("%s(), incoming connection, source LSAP=%d, dest LSAP=%d\n", + __func__, slsap_sel, dlsap_sel); /* Try to find LSAP among the unconnected LSAPs */ lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD, @@ -125,7 +120,8 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, /* Maybe LSAP was already connected, so try one more time */ if (!lsap) { - IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __func__); + pr_debug("%s(), incoming connection for LSAP already connected\n", + __func__); lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0, self->lsaps); } @@ -134,14 +130,14 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, self->lsaps); if (lsap == NULL) { - IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n"); - IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n", - __func__, slsap_sel, dlsap_sel); + pr_debug("IrLMP, Sorry, no LSAP for received frame!\n"); + pr_debug("%s(), slsap_sel = %02x, dlsap_sel = %02x\n", + __func__, slsap_sel, dlsap_sel); if (fp[0] & CONTROL_BIT) { - IRDA_DEBUG(2, "%s(), received control frame %02x\n", - __func__, fp[2]); + pr_debug("%s(), received control frame %02x\n", + __func__, fp[2]); } else { - IRDA_DEBUG(2, "%s(), received data frame\n", __func__); + pr_debug("%s(), received data frame\n", __func__); } return; } @@ -159,20 +155,20 @@ void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb, irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb); break; case DISCONNECT: - IRDA_DEBUG(4, "%s(), Disconnect indication!\n", - __func__); + pr_debug("%s(), Disconnect indication!\n", + __func__); irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION, skb); break; case ACCESSMODE_CMD: - IRDA_DEBUG(0, "Access mode cmd not implemented!\n"); + pr_debug("Access mode cmd not implemented!\n"); break; case ACCESSMODE_CNF: - IRDA_DEBUG(0, "Access mode cnf not implemented!\n"); + pr_debug("Access mode cnf not implemented!\n"); break; default: - IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n", - __func__, fp[2]); + pr_debug("%s(), Unknown control frame %02x\n", + __func__, fp[2]); break; } } else if (unreliable) { @@ -206,8 +202,6 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) __u8 *fp; unsigned long flags; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(skb->len > 2, return;); @@ -223,14 +217,14 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) pid = fp[2]; if (pid & 0x80) { - IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", - __func__); + pr_debug("%s(), extension in PID not supp!\n", + __func__); return; } /* Check if frame is addressed to the connectionless LSAP */ if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) { - IRDA_DEBUG(0, "%s(), dropping frame!\n", __func__); + pr_debug("%s(), dropping frame!\n", __func__); return; } @@ -254,7 +248,7 @@ void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb) if (lsap) irlmp_connless_data_indication(lsap, skb); else { - IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __func__); + pr_debug("%s(), found no matching LSAP!\n", __func__); } } #endif /* CONFIG_IRDA_ULTRA */ @@ -270,8 +264,6 @@ void irlmp_link_disconnect_indication(struct lap_cb *lap, LAP_REASON reason, struct sk_buff *skb) { - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(lap != NULL, return;); IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;); @@ -296,8 +288,6 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr, __u32 daddr, struct qos_info *qos, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - /* Copy QoS settings for this session */ self->qos = qos; @@ -317,8 +307,6 @@ void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr, void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos, struct sk_buff *skb) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); IRDA_ASSERT(qos != NULL, return;); @@ -383,8 +371,6 @@ void irlmp_link_discovery_indication(struct lap_cb *self, */ void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log) { - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;); diff --git a/net/irda/irmod.c b/net/irda/irmod.c index 303a68d92731..c5e35b85c477 100644 --- a/net/irda/irmod.c +++ b/net/irda/irmod.c @@ -42,16 +42,6 @@ #include <net/irda/irttp.h> /* irttp_init */ #include <net/irda/irda_device.h> /* irda_device_init */ -/* - * Module parameters - */ -#ifdef CONFIG_IRDA_DEBUG -unsigned int irda_debug = IRDA_DEBUG_LEVEL; -module_param_named(debug, irda_debug, uint, 0); -MODULE_PARM_DESC(debug, "IRDA debugging level"); -EXPORT_SYMBOL(irda_debug); -#endif - /* Packet type handler. * Tell the kernel how IrDA packets should be handled. */ @@ -90,8 +80,6 @@ static int __init irda_init(void) { int ret = 0; - IRDA_DEBUG(0, "%s()\n", __func__); - /* Lower layer of the stack */ irlmp_init(); irlap_init(); diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c index a37b81fe0479..e15c40e86660 100644 --- a/net/irda/irnetlink.c +++ b/net/irda/irnetlink.c @@ -41,7 +41,7 @@ static struct net_device * ifname_to_netdev(struct net *net, struct genl_info *i ifname = nla_data(info->attrs[IRDA_NL_ATTR_IFNAME]); - IRDA_DEBUG(5, "%s(): Looking for %s\n", __func__, ifname); + pr_debug("%s(): Looking for %s\n", __func__, ifname); return dev_get_by_name(net, ifname); } @@ -57,7 +57,7 @@ static int irda_nl_set_mode(struct sk_buff *skb, struct genl_info *info) mode = nla_get_u32(info->attrs[IRDA_NL_ATTR_MODE]); - IRDA_DEBUG(5, "%s(): Switching to mode: %d\n", __func__, mode); + pr_debug("%s(): Switching to mode: %d\n", __func__, mode); dev = ifname_to_netdev(&init_net, info); if (!dev) diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c index 7152624ed5f1..acbe61c7e683 100644 --- a/net/irda/irqueue.c +++ b/net/irda/irqueue.c @@ -233,8 +233,6 @@ static __u32 hash( const char* name) static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) { - IRDA_DEBUG( 4, "%s()\n", __func__); - /* * Check if queue is empty. */ @@ -267,7 +265,7 @@ static irda_queue_t *dequeue_first(irda_queue_t **queue) { irda_queue_t *ret; - IRDA_DEBUG( 4, "dequeue_first()\n"); + pr_debug("dequeue_first()\n"); /* * Set return value @@ -308,7 +306,7 @@ static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element { irda_queue_t *ret; - IRDA_DEBUG( 4, "dequeue_general()\n"); + pr_debug("dequeue_general()\n"); /* * Set return value @@ -452,8 +450,6 @@ void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv, unsigned long flags = 0; int bin; - IRDA_DEBUG( 4, "%s()\n", __func__); - IRDA_ASSERT( hashbin != NULL, return;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;); @@ -565,8 +561,6 @@ void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name) unsigned long flags = 0; irda_queue_t* entry; - IRDA_DEBUG( 4, "%s()\n", __func__); - IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); @@ -658,8 +652,6 @@ void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry) int bin; long hashv; - IRDA_DEBUG( 4, "%s()\n", __func__); - IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); IRDA_ASSERT( entry != NULL, return NULL;); @@ -719,7 +711,7 @@ void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name ) int bin; irda_queue_t* entry; - IRDA_DEBUG( 4, "hashbin_find()\n"); + pr_debug("hashbin_find()\n"); IRDA_ASSERT( hashbin != NULL, return NULL;); IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;); diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c index d6a59651767a..873da5e7d428 100644 --- a/net/irda/irsysctl.c +++ b/net/irda/irsysctl.c @@ -126,15 +126,6 @@ static struct ctl_table irda_table[] = { .mode = 0644, .proc_handler = do_devname, }, -#ifdef CONFIG_IRDA_DEBUG - { - .procname = "debug", - .data = &irda_debug, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, -#endif #ifdef CONFIG_IRDA_FAST_RR { .procname = "fast_poll_increase", diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 85372cfa7b9f..3ef0b08b6bf5 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -96,8 +96,8 @@ int __init irttp_init(void) irttp->tsaps = hashbin_new(HB_LOCK); if (!irttp->tsaps) { - IRDA_ERROR("%s: can't allocate IrTTP hashbin!\n", - __func__); + net_err_ratelimited("%s: can't allocate IrTTP hashbin!\n", + __func__); kfree(irttp); return -ENOMEM; } @@ -166,7 +166,7 @@ static void irttp_todo_expired(unsigned long data) if (!self || self->magic != TTP_TSAP_MAGIC) return; - IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self); + pr_debug("%s(instance=%p)\n", __func__, self); /* Try to make some progress, especially on Tx side - Jean II */ irttp_run_rx_queue(self); @@ -207,8 +207,6 @@ static void irttp_flush_queues(struct tsap_cb *self) { struct sk_buff *skb; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); @@ -240,8 +238,8 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self) IRDA_ASSERT(self != NULL, return NULL;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return NULL;); - IRDA_DEBUG(2, "%s(), self->rx_sdu_size=%d\n", __func__, - self->rx_sdu_size); + pr_debug("%s(), self->rx_sdu_size=%d\n", __func__, + self->rx_sdu_size); skb = dev_alloc_skb(TTP_HEADER + self->rx_sdu_size); if (!skb) @@ -264,9 +262,8 @@ static struct sk_buff *irttp_reassemble_skb(struct tsap_cb *self) dev_kfree_skb(frag); } - IRDA_DEBUG(2, - "%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n", - __func__, n, self->rx_sdu_size, self->rx_max_sdu_size); + pr_debug("%s(), frame len=%d, rx_sdu_size=%d, rx_max_sdu_size=%d\n", + __func__, n, self->rx_sdu_size, self->rx_max_sdu_size); /* Note : irttp_run_rx_queue() calculate self->rx_sdu_size * by summing the size of all fragments, so we should always * have n == self->rx_sdu_size, except in cases where we @@ -295,8 +292,6 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, struct sk_buff *frag; __u8 *frame; - IRDA_DEBUG(2, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); IRDA_ASSERT(skb != NULL, return;); @@ -305,7 +300,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, * Split frame into a number of segments */ while (skb->len > self->max_seg_size) { - IRDA_DEBUG(2, "%s(), fragmenting ...\n", __func__); + pr_debug("%s(), fragmenting ...\n", __func__); /* Make new segment */ frag = alloc_skb(self->max_seg_size+self->max_header_size, @@ -330,7 +325,7 @@ static inline void irttp_fragment_skb(struct tsap_cb *self, skb_queue_tail(&self->tx_queue, frag); } /* Queue what is left of the original skb */ - IRDA_DEBUG(2, "%s(), queuing last segment\n", __func__); + pr_debug("%s(), queuing last segment\n", __func__); frame = skb_push(skb, TTP_HEADER); frame[0] = 0x00; /* Clear more bit */ @@ -361,7 +356,7 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param, else self->tx_max_sdu_size = param->pv.i; - IRDA_DEBUG(1, "%s(), MaxSduSize=%d\n", __func__, param->pv.i); + pr_debug("%s(), MaxSduSize=%d\n", __func__, param->pv.i); return 0; } @@ -402,15 +397,13 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) * JeanII */ if ((stsap_sel != LSAP_ANY) && ((stsap_sel < 0x01) || (stsap_sel >= 0x70))) { - IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__); + pr_debug("%s(), invalid tsap!\n", __func__); return NULL; } self = kzalloc(sizeof(struct tsap_cb), GFP_ATOMIC); - if (self == NULL) { - IRDA_DEBUG(0, "%s(), unable to kmalloc!\n", __func__); + if (self == NULL) return NULL; - } /* Initialize internal objects */ irttp_init_tsap(self); @@ -440,7 +433,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) */ lsap = irlmp_open_lsap(stsap_sel, &ttp_notify, 0); if (lsap == NULL) { - IRDA_DEBUG(0, "%s: unable to allocate LSAP!!\n", __func__); + pr_debug("%s: unable to allocate LSAP!!\n", __func__); __irttp_close_tsap(self); return NULL; } @@ -451,7 +444,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify) * the stsap_sel we have might not be valid anymore */ self->stsap_sel = lsap->slsap_sel; - IRDA_DEBUG(4, "%s(), stsap_sel=%02x\n", __func__, self->stsap_sel); + pr_debug("%s(), stsap_sel=%02x\n", __func__, self->stsap_sel); self->notify = *notify; self->lsap = lsap; @@ -509,8 +502,6 @@ int irttp_close_tsap(struct tsap_cb *self) { struct tsap_cb *tsap; - IRDA_DEBUG(4, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); @@ -518,8 +509,8 @@ int irttp_close_tsap(struct tsap_cb *self) if (self->connected) { /* Check if disconnect is not pending */ if (!test_bit(0, &self->disconnect_pend)) { - IRDA_WARNING("%s: TSAP still connected!\n", - __func__); + net_warn_ratelimited("%s: TSAP still connected!\n", + __func__); irttp_disconnect_request(self, NULL, P_NORMAL); } self->close_pend = TRUE; @@ -558,8 +549,6 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(4, "%s()\n", __func__); - /* Take shortcut on zero byte packets */ if (skb->len == 0) { ret = 0; @@ -568,13 +557,14 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) /* Check that nothing bad happens */ if (!self->connected) { - IRDA_WARNING("%s(), Not connected\n", __func__); + net_warn_ratelimited("%s(), Not connected\n", __func__); ret = -ENOTCONN; goto err; } if (skb->len > self->max_seg_size) { - IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__); + net_err_ratelimited("%s(), UData is too large for IrLAP!\n", + __func__); ret = -EMSGSIZE; goto err; } @@ -606,8 +596,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); - IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, - skb_queue_len(&self->tx_queue)); + pr_debug("%s() : queue len = %d\n", __func__, + skb_queue_len(&self->tx_queue)); /* Take shortcut on zero byte packets */ if (skb->len == 0) { @@ -617,7 +607,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) /* Check that nothing bad happens */ if (!self->connected) { - IRDA_WARNING("%s: Not connected\n", __func__); + net_warn_ratelimited("%s: Not connected\n", __func__); ret = -ENOTCONN; goto err; } @@ -627,8 +617,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) * inside an IrLAP frame */ if ((self->tx_max_sdu_size == 0) && (skb->len > self->max_seg_size)) { - IRDA_ERROR("%s: SAR disabled, and data is too large for IrLAP!\n", - __func__); + net_err_ratelimited("%s: SAR disabled, and data is too large for IrLAP!\n", + __func__); ret = -EMSGSIZE; goto err; } @@ -640,8 +630,8 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) if ((self->tx_max_sdu_size != 0) && (self->tx_max_sdu_size != TTP_SAR_UNBOUND) && (skb->len > self->tx_max_sdu_size)) { - IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", - __func__); + net_err_ratelimited("%s: SAR enabled, but data is larger than TxMaxSduSize!\n", + __func__); ret = -EMSGSIZE; goto err; } @@ -719,9 +709,9 @@ static void irttp_run_tx_queue(struct tsap_cb *self) unsigned long flags; int n; - IRDA_DEBUG(2, "%s() : send_credit = %d, queue_len = %d\n", - __func__, - self->send_credit, skb_queue_len(&self->tx_queue)); + pr_debug("%s() : send_credit = %d, queue_len = %d\n", + __func__, + self->send_credit, skb_queue_len(&self->tx_queue)); /* Get exclusive access to the tx queue, otherwise don't touch it */ if (irda_lock(&self->tx_queue_lock) == FALSE) @@ -826,9 +816,9 @@ static inline void irttp_give_credit(struct tsap_cb *self) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); - IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", - __func__, - self->send_credit, self->avail_credit, self->remote_credit); + pr_debug("%s() send=%d,avail=%d,remote=%d\n", + __func__, + self->send_credit, self->avail_credit, self->remote_credit); /* Give credit to peer */ tx_skb = alloc_skb(TTP_MAX_HEADER, GFP_ATOMIC); @@ -876,8 +866,6 @@ static int irttp_udata_indication(void *instance, void *sap, struct tsap_cb *self; int err; - IRDA_DEBUG(4, "%s()\n", __func__); - self = instance; IRDA_ASSERT(self != NULL, return -1;); @@ -993,8 +981,6 @@ static void irttp_status_indication(void *instance, { struct tsap_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__); - self = instance; IRDA_ASSERT(self != NULL, return;); @@ -1011,7 +997,7 @@ static void irttp_status_indication(void *instance, self->notify.status_indication(self->notify.instance, link, lock); else - IRDA_DEBUG(2, "%s(), no handler\n", __func__); + pr_debug("%s(), no handler\n", __func__); } /* @@ -1029,7 +1015,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); - IRDA_DEBUG(4, "%s(instance=%p)\n", __func__, self); + pr_debug("%s(instance=%p)\n", __func__, self); /* We are "polled" directly from LAP, and the LAP want to fill * its Tx window. We want to do our best to send it data, so that @@ -1067,18 +1053,16 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) */ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) { - IRDA_DEBUG(1, "%s()\n", __func__); - IRDA_ASSERT(self != NULL, return;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); switch (flow) { case FLOW_STOP: - IRDA_DEBUG(1, "%s(), flow stop\n", __func__); + pr_debug("%s(), flow stop\n", __func__); self->rx_sdu_busy = TRUE; break; case FLOW_START: - IRDA_DEBUG(1, "%s(), flow start\n", __func__); + pr_debug("%s(), flow start\n", __func__); self->rx_sdu_busy = FALSE; /* Client say he can accept more data, try to free our @@ -1087,7 +1071,7 @@ void irttp_flow_request(struct tsap_cb *self, LOCAL_FLOW flow) break; default: - IRDA_DEBUG(1, "%s(), Unknown flow command!\n", __func__); + pr_debug("%s(), Unknown flow command!\n", __func__); } } EXPORT_SYMBOL(irttp_flow_request); @@ -1107,7 +1091,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel, __u8 *frame; __u8 n; - IRDA_DEBUG(4, "%s(), max_sdu_size=%d\n", __func__, max_sdu_size); + pr_debug("%s(), max_sdu_size=%d\n", __func__, max_sdu_size); IRDA_ASSERT(self != NULL, return -EBADR;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;); @@ -1205,8 +1189,6 @@ static void irttp_connect_confirm(void *instance, void *sap, __u8 plen; __u8 n; - IRDA_DEBUG(4, "%s()\n", __func__); - self = instance; IRDA_ASSERT(self != NULL, return;); @@ -1221,15 +1203,15 @@ static void irttp_connect_confirm(void *instance, void *sap, * negotiated QoS for the link. */ if (qos) { - IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %02x\n", - qos->baud_rate.bits); - IRDA_DEBUG(4, "IrTTP, Negotiated BAUD_RATE: %d bps.\n", - qos->baud_rate.value); + pr_debug("IrTTP, Negotiated BAUD_RATE: %02x\n", + qos->baud_rate.bits); + pr_debug("IrTTP, Negotiated BAUD_RATE: %d bps.\n", + qos->baud_rate.value); } n = skb->data[0] & 0x7f; - IRDA_DEBUG(4, "%s(), Initial send_credit=%d\n", __func__, n); + pr_debug("%s(), Initial send_credit=%d\n", __func__, n); self->send_credit = n; self->tx_max_sdu_size = 0; @@ -1249,8 +1231,8 @@ static void irttp_connect_confirm(void *instance, void *sap, /* Any errors in the parameter list? */ if (ret < 0) { - IRDA_WARNING("%s: error extracting parameters\n", - __func__); + net_warn_ratelimited("%s: error extracting parameters\n", + __func__); dev_kfree_skb(skb); /* Do not accept this connection attempt */ @@ -1260,11 +1242,11 @@ static void irttp_connect_confirm(void *instance, void *sap, skb_pull(skb, IRDA_MIN(skb->len, plen+1)); } - IRDA_DEBUG(4, "%s() send=%d,avail=%d,remote=%d\n", __func__, - self->send_credit, self->avail_credit, self->remote_credit); + pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__, + self->send_credit, self->avail_credit, self->remote_credit); - IRDA_DEBUG(2, "%s(), MaxSduSize=%d\n", __func__, - self->tx_max_sdu_size); + pr_debug("%s(), MaxSduSize=%d\n", __func__, + self->tx_max_sdu_size); if (self->notify.connect_confirm) { self->notify.connect_confirm(self->notify.instance, self, qos, @@ -1302,7 +1284,7 @@ static void irttp_connect_indication(void *instance, void *sap, self->max_seg_size = max_seg_size - TTP_HEADER; self->max_header_size = max_header_size+TTP_HEADER; - IRDA_DEBUG(4, "%s(), TSAP sel=%02x\n", __func__, self->stsap_sel); + pr_debug("%s(), TSAP sel=%02x\n", __func__, self->stsap_sel); /* Need to update dtsap_sel if its equal to LSAP_ANY */ self->dtsap_sel = lsap->dlsap_sel; @@ -1326,8 +1308,8 @@ static void irttp_connect_indication(void *instance, void *sap, /* Any errors in the parameter list? */ if (ret < 0) { - IRDA_WARNING("%s: error extracting parameters\n", - __func__); + net_warn_ratelimited("%s: error extracting parameters\n", + __func__); dev_kfree_skb(skb); /* Do not accept this connection attempt */ @@ -1364,8 +1346,8 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size, IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); - IRDA_DEBUG(4, "%s(), Source TSAP selector=%02x\n", __func__, - self->stsap_sel); + pr_debug("%s(), Source TSAP selector=%02x\n", __func__, + self->stsap_sel); /* Any userdata supplied? */ if (userdata == NULL) { @@ -1446,14 +1428,12 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) struct tsap_cb *new; unsigned long flags; - IRDA_DEBUG(1, "%s()\n", __func__); - /* Protect our access to the old tsap instance */ spin_lock_irqsave(&irttp->tsaps->hb_spinlock, flags); /* Find the old instance */ if (!hashbin_find(irttp->tsaps, (long) orig, NULL)) { - IRDA_DEBUG(0, "%s(), unable to find TSAP\n", __func__); + pr_debug("%s(), unable to find TSAP\n", __func__); spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); return NULL; } @@ -1461,7 +1441,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) /* Allocate a new instance */ new = kmemdup(orig, sizeof(struct tsap_cb), GFP_ATOMIC); if (!new) { - IRDA_DEBUG(0, "%s(), unable to kmalloc\n", __func__); + pr_debug("%s(), unable to kmalloc\n", __func__); spin_unlock_irqrestore(&irttp->tsaps->hb_spinlock, flags); return NULL; } @@ -1473,7 +1453,7 @@ struct tsap_cb *irttp_dup(struct tsap_cb *orig, void *instance) /* Try to dup the LSAP (may fail if we were too slow) */ new->lsap = irlmp_dup(orig->lsap, new); if (!new->lsap) { - IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); + pr_debug("%s(), dup failed!\n", __func__); kfree(new); return NULL; } @@ -1508,7 +1488,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, /* Already disconnected? */ if (!self->connected) { - IRDA_DEBUG(4, "%s(), already disconnected!\n", __func__); + pr_debug("%s(), already disconnected!\n", __func__); if (userdata) dev_kfree_skb(userdata); return -1; @@ -1520,8 +1500,8 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, * for following a disconnect_indication() (i.e. net_bh). * Jean II */ if (test_and_set_bit(0, &self->disconnect_pend)) { - IRDA_DEBUG(0, "%s(), disconnect already pending\n", - __func__); + pr_debug("%s(), disconnect already pending\n", + __func__); if (userdata) dev_kfree_skb(userdata); @@ -1540,7 +1520,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, * disconnecting right now since the data will * not have any usable connection to be sent on */ - IRDA_DEBUG(1, "%s(): High priority!!()\n", __func__); + pr_debug("%s(): High priority!!()\n", __func__); irttp_flush_queues(self); } else if (priority == P_NORMAL) { /* @@ -1561,7 +1541,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata, * be sent at the LMP level (so even if the peer has its Tx queue * full of data). - Jean II */ - IRDA_DEBUG(1, "%s(), Disconnecting ...\n", __func__); + pr_debug("%s(), Disconnecting ...\n", __func__); self->connected = FALSE; if (!userdata) { @@ -1597,8 +1577,6 @@ static void irttp_disconnect_indication(void *instance, void *sap, { struct tsap_cb *self; - IRDA_DEBUG(4, "%s()\n", __func__); - self = instance; IRDA_ASSERT(self != NULL, return;); @@ -1657,7 +1635,7 @@ static void irttp_do_data_indication(struct tsap_cb *self, struct sk_buff *skb) * give an error back */ if (err) { - IRDA_DEBUG(0, "%s() requeueing skb!\n", __func__); + pr_debug("%s() requeueing skb!\n", __func__); /* Make sure we take a break */ self->rx_sdu_busy = TRUE; @@ -1682,8 +1660,8 @@ static void irttp_run_rx_queue(struct tsap_cb *self) struct sk_buff *skb; int more = 0; - IRDA_DEBUG(2, "%s() send=%d,avail=%d,remote=%d\n", __func__, - self->send_credit, self->avail_credit, self->remote_credit); + pr_debug("%s() send=%d,avail=%d,remote=%d\n", __func__, + self->send_credit, self->avail_credit, self->remote_credit); /* Get exclusive access to the rx queue, otherwise don't touch it */ if (irda_lock(&self->rx_queue_lock) == FALSE) @@ -1722,8 +1700,8 @@ static void irttp_run_rx_queue(struct tsap_cb *self) * limits of the maximum size of the rx_sdu */ if (self->rx_sdu_size <= self->rx_max_sdu_size) { - IRDA_DEBUG(4, "%s(), queueing frag\n", - __func__); + pr_debug("%s(), queueing frag\n", + __func__); skb_queue_tail(&self->rx_fragments, skb); } else { /* Free the part of the SDU that is too big */ @@ -1752,7 +1730,7 @@ static void irttp_run_rx_queue(struct tsap_cb *self) /* Now we can deliver the reassembled skb */ irttp_do_data_indication(self, skb); } else { - IRDA_DEBUG(1, "%s(), Truncated frame\n", __func__); + pr_debug("%s(), Truncated frame\n", __func__); /* Free the part of the SDU that is too big */ dev_kfree_skb(skb); diff --git a/net/irda/parameters.c b/net/irda/parameters.c index 6d0869716bf6..006786bfdd65 100644 --- a/net/irda/parameters.c +++ b/net/irda/parameters.c @@ -146,24 +146,24 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi, */ if (p.pl == 0) { if (p.pv.i < 0xff) { - IRDA_DEBUG(2, "%s(), using 1 byte\n", __func__); + pr_debug("%s(), using 1 byte\n", __func__); p.pl = 1; } else if (p.pv.i < 0xffff) { - IRDA_DEBUG(2, "%s(), using 2 bytes\n", __func__); + pr_debug("%s(), using 2 bytes\n", __func__); p.pl = 2; } else { - IRDA_DEBUG(2, "%s(), using 4 bytes\n", __func__); + pr_debug("%s(), using 4 bytes\n", __func__); p.pl = 4; /* Default length */ } } /* Check if buffer is long enough for insertion */ if (len < (2+p.pl)) { - IRDA_WARNING("%s: buffer too short for insertion!\n", - __func__); + net_warn_ratelimited("%s: buffer too short for insertion!\n", + __func__); return -1; } - IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__, - p.pi, p.pl, p.pv.i); + pr_debug("%s(), pi=%#x, pl=%d, pi=%d\n", __func__, + p.pi, p.pl, p.pv.i); switch (p.pl) { case 1: n += irda_param_pack(buf, "bbb", p.pi, p.pl, (__u8) p.pv.i); @@ -184,8 +184,8 @@ static int irda_insert_integer(void *self, __u8 *buf, int len, __u8 pi, break; default: - IRDA_WARNING("%s: length %d not supported\n", - __func__, p.pl); + net_warn_ratelimited("%s: length %d not supported\n", + __func__, p.pl); /* Skip parameter */ return -1; } @@ -214,9 +214,8 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - IRDA_WARNING("%s: buffer too short for parsing! " - "Need %d bytes, but len is only %d\n", - __func__, p.pl, len); + net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n", + __func__, p.pl, len); return -1; } @@ -226,9 +225,8 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, * PV_INTEGER means that the handler is flexible. */ if (((type & PV_MASK) != PV_INTEGER) && ((type & PV_MASK) != p.pl)) { - IRDA_ERROR("%s: invalid parameter length! " - "Expected %d bytes, but value had %d bytes!\n", - __func__, type & PV_MASK, p.pl); + net_err_ratelimited("%s: invalid parameter length! Expected %d bytes, but value had %d bytes!\n", + __func__, type & PV_MASK, p.pl); /* Most parameters are bit/byte fields or little endian, * so it's ok to only extract a subset of it (the subset @@ -265,15 +263,15 @@ static int irda_extract_integer(void *self, __u8 *buf, int len, __u8 pi, le32_to_cpus(&p.pv.i); break; default: - IRDA_WARNING("%s: length %d not supported\n", - __func__, p.pl); + net_warn_ratelimited("%s: length %d not supported\n", + __func__, p.pl); /* Skip parameter */ return p.pl+2; } - IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d, pi=%d\n", __func__, - p.pi, p.pl, p.pv.i); + pr_debug("%s(), pi=%#x, pl=%d, pi=%d\n", __func__, + p.pi, p.pl, p.pv.i); /* Call handler for this parameter */ err = (*func)(self, &p, PV_PUT); if (err < 0) @@ -292,21 +290,18 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, irda_param_t p; int err; - IRDA_DEBUG(2, "%s()\n", __func__); - p.pi = pi; /* In case handler needs to know */ p.pl = buf[1]; /* Extract length of value */ if (p.pl > 32) p.pl = 32; - IRDA_DEBUG(2, "%s(), pi=%#x, pl=%d\n", __func__, - p.pi, p.pl); + pr_debug("%s(), pi=%#x, pl=%d\n", __func__, + p.pi, p.pl); /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - IRDA_WARNING("%s: buffer too short for parsing! " - "Need %d bytes, but len is only %d\n", - __func__, p.pl, len); + net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n", + __func__, p.pl, len); return -1; } @@ -314,8 +309,8 @@ static int irda_extract_string(void *self, __u8 *buf, int len, __u8 pi, * checked that the buffer is long enough */ strncpy(str, buf+2, p.pl); - IRDA_DEBUG(2, "%s(), str=0x%02x 0x%02x\n", __func__, - (__u8) str[0], (__u8) str[1]); + pr_debug("%s(), str=0x%02x 0x%02x\n", + __func__, (__u8)str[0], (__u8)str[1]); /* Null terminate string */ str[p.pl] = '\0'; @@ -343,13 +338,12 @@ static int irda_extract_octseq(void *self, __u8 *buf, int len, __u8 pi, /* Check if buffer is long enough for parsing */ if (len < (2+p.pl)) { - IRDA_WARNING("%s: buffer too short for parsing! " - "Need %d bytes, but len is only %d\n", - __func__, p.pl, len); + net_warn_ratelimited("%s: buffer too short for parsing! Need %d bytes, but len is only %d\n", + __func__, p.pl, len); return -1; } - IRDA_DEBUG(0, "%s(), not impl\n", __func__); + pr_debug("%s(), not impl\n", __func__); return p.pl+2; /* Extracted pl+2 bytes */ } @@ -472,8 +466,8 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len, if ((pi_major > info->len-1) || (pi_minor > info->tables[pi_major].len-1)) { - IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", - __func__, pi); + pr_debug("%s(), no handler for parameter=0x%02x\n", + __func__, pi); /* Skip this parameter */ return -1; @@ -487,7 +481,8 @@ int irda_param_insert(void *self, __u8 pi, __u8 *buf, int len, /* Check if handler has been implemented */ if (!pi_minor_info->func) { - IRDA_MESSAGE("%s: no handler for pi=%#x\n", __func__, pi); + net_info_ratelimited("%s: no handler for pi=%#x\n", + __func__, pi); /* Skip this parameter */ return -1; } @@ -526,8 +521,8 @@ static int irda_param_extract(void *self, __u8 *buf, int len, if ((pi_major > info->len-1) || (pi_minor > info->tables[pi_major].len-1)) { - IRDA_DEBUG(0, "%s(), no handler for parameter=0x%02x\n", - __func__, buf[0]); + pr_debug("%s(), no handler for parameter=0x%02x\n", + __func__, buf[0]); /* Skip this parameter */ return 2 + buf[n + 1]; /* Continue */ @@ -539,13 +534,13 @@ static int irda_param_extract(void *self, __u8 *buf, int len, /* Find expected data type for this parameter identifier (pi)*/ type = pi_minor_info->type; - IRDA_DEBUG(3, "%s(), pi=[%d,%d], type=%d\n", __func__, - pi_major, pi_minor, type); + pr_debug("%s(), pi=[%d,%d], type=%d\n", __func__, + pi_major, pi_minor, type); /* Check if handler has been implemented */ if (!pi_minor_info->func) { - IRDA_MESSAGE("%s: no handler for pi=%#x\n", - __func__, buf[n]); + net_info_ratelimited("%s: no handler for pi=%#x\n", + __func__, buf[n]); /* Skip this parameter */ return 2 + buf[n + 1]; /* Continue */ } diff --git a/net/irda/qos.c b/net/irda/qos.c index 11a7cc0cbc28..5ed6c9a7baee 100644 --- a/net/irda/qos.c +++ b/net/irda/qos.c @@ -200,8 +200,8 @@ static int msb_index (__u16 word) * able to check precisely what's going on. If a end user sees this, * it's very likely the peer. - Jean II */ if (word == 0) { - IRDA_WARNING("%s(), Detected buggy peer, adjust null PV to 0x1!\n", - __func__); + net_warn_ratelimited("%s(), Detected buggy peer, adjust null PV to 0x1!\n", + __func__); /* The only safe choice (we don't know the array size) */ word = 0x1; } @@ -342,8 +342,6 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) __u32 line_capacity; int index; - IRDA_DEBUG(2, "%s()\n", __func__); - /* * Make sure the mintt is sensible. * Main culprit : Ericsson T39. - Jean II @@ -351,8 +349,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) if (sysctl_min_tx_turn_time > qos->min_turn_time.value) { int i; - IRDA_WARNING("%s(), Detected buggy peer, adjust mtt to %dus!\n", - __func__, sysctl_min_tx_turn_time); + net_warn_ratelimited("%s(), Detected buggy peer, adjust mtt to %dus!\n", + __func__, sysctl_min_tx_turn_time); /* We don't really need bits, but easier this way */ i = value_highest_bit(sysctl_min_tx_turn_time, min_turn_times, @@ -368,9 +366,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) if ((qos->baud_rate.value < 115200) && (qos->max_turn_time.value < 500)) { - IRDA_DEBUG(0, - "%s(), adjusting max turn time from %d to 500 ms\n", - __func__, qos->max_turn_time.value); + pr_debug("%s(), adjusting max turn time from %d to 500 ms\n", + __func__, qos->max_turn_time.value); qos->max_turn_time.value = 500; } @@ -385,8 +382,8 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) #ifdef CONFIG_IRDA_DYNAMIC_WINDOW while ((qos->data_size.value > line_capacity) && (index > 0)) { qos->data_size.value = data_sizes[index--]; - IRDA_DEBUG(2, "%s(), reducing data size to %d\n", - __func__, qos->data_size.value); + pr_debug("%s(), reducing data size to %d\n", + __func__, qos->data_size.value); } #else /* Use method described in section 6.6.11 of IrLAP */ while (irlap_requested_line_capacity(qos) > line_capacity) { @@ -395,15 +392,15 @@ static void irlap_adjust_qos_settings(struct qos_info *qos) /* Must be able to send at least one frame */ if (qos->window_size.value > 1) { qos->window_size.value--; - IRDA_DEBUG(2, "%s(), reducing window size to %d\n", - __func__, qos->window_size.value); + pr_debug("%s(), reducing window size to %d\n", + __func__, qos->window_size.value); } else if (index > 1) { qos->data_size.value = data_sizes[index--]; - IRDA_DEBUG(2, "%s(), reducing data size to %d\n", - __func__, qos->data_size.value); + pr_debug("%s(), reducing data size to %d\n", + __func__, qos->data_size.value); } else { - IRDA_WARNING("%s(), nothing more we can do!\n", - __func__); + net_warn_ratelimited("%s(), nothing more we can do!\n", + __func__); } } #endif /* CONFIG_IRDA_DYNAMIC_WINDOW */ @@ -440,20 +437,20 @@ int irlap_qos_negotiate(struct irlap_cb *self, struct sk_buff *skb) irlap_adjust_qos_settings(&self->qos_tx); - IRDA_DEBUG(2, "Setting BAUD_RATE to %d bps.\n", - self->qos_tx.baud_rate.value); - IRDA_DEBUG(2, "Setting DATA_SIZE to %d bytes\n", - self->qos_tx.data_size.value); - IRDA_DEBUG(2, "Setting WINDOW_SIZE to %d\n", - self->qos_tx.window_size.value); - IRDA_DEBUG(2, "Setting XBOFS to %d\n", - self->qos_tx.additional_bofs.value); - IRDA_DEBUG(2, "Setting MAX_TURN_TIME to %d ms.\n", - self->qos_tx.max_turn_time.value); - IRDA_DEBUG(2, "Setting MIN_TURN_TIME to %d usecs.\n", - self->qos_tx.min_turn_time.value); - IRDA_DEBUG(2, "Setting LINK_DISC to %d secs.\n", - self->qos_tx.link_disc_time.value); + pr_debug("Setting BAUD_RATE to %d bps.\n", + self->qos_tx.baud_rate.value); + pr_debug("Setting DATA_SIZE to %d bytes\n", + self->qos_tx.data_size.value); + pr_debug("Setting WINDOW_SIZE to %d\n", + self->qos_tx.window_size.value); + pr_debug("Setting XBOFS to %d\n", + self->qos_tx.additional_bofs.value); + pr_debug("Setting MAX_TURN_TIME to %d ms.\n", + self->qos_tx.max_turn_time.value); + pr_debug("Setting MIN_TURN_TIME to %d usecs.\n", + self->qos_tx.min_turn_time.value); + pr_debug("Setting LINK_DISC to %d secs.\n", + self->qos_tx.link_disc_time.value); return ret; } @@ -537,17 +534,17 @@ static int irlap_param_baud_rate(void *instance, irda_param_t *param, int get) if (get) { param->pv.i = self->qos_rx.baud_rate.bits; - IRDA_DEBUG(2, "%s(), baud rate = 0x%02x\n", - __func__, param->pv.i); + pr_debug("%s(), baud rate = 0x%02x\n", + __func__, param->pv.i); } else { /* * Stations must agree on baud rate, so calculate * intersection */ - IRDA_DEBUG(2, "Requested BAUD_RATE: 0x%04x\n", (__u16) param->pv.i); + pr_debug("Requested BAUD_RATE: 0x%04x\n", (__u16)param->pv.i); final = (__u16) param->pv.i & self->qos_rx.baud_rate.bits; - IRDA_DEBUG(2, "Final BAUD_RATE: 0x%04x\n", final); + pr_debug("Final BAUD_RATE: 0x%04x\n", final); self->qos_tx.baud_rate.bits = final; self->qos_rx.baud_rate.bits = final; } @@ -578,10 +575,10 @@ static int irlap_param_link_disconnect(void *instance, irda_param_t *param, * Stations must agree on link disconnect/threshold * time. */ - IRDA_DEBUG(2, "LINK_DISC: %02x\n", (__u8) param->pv.i); + pr_debug("LINK_DISC: %02x\n", (__u8)param->pv.i); final = (__u8) param->pv.i & self->qos_rx.link_disc_time.bits; - IRDA_DEBUG(2, "Final LINK_DISC: %02x\n", final); + pr_debug("Final LINK_DISC: %02x\n", final); self->qos_tx.link_disc_time.bits = final; self->qos_rx.link_disc_time.bits = final; } @@ -710,8 +707,8 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time) __u32 line_capacity; int i,j; - IRDA_DEBUG(2, "%s(), speed=%d, max_turn_time=%d\n", - __func__, speed, max_turn_time); + pr_debug("%s(), speed=%d, max_turn_time=%d\n", + __func__, speed, max_turn_time); i = value_index(speed, baud_rates, 10); j = value_index(max_turn_time, max_turn_times, 4); @@ -721,8 +718,8 @@ __u32 irlap_max_line_capacity(__u32 speed, __u32 max_turn_time) line_capacity = max_line_capacities[i][j]; - IRDA_DEBUG(2, "%s(), line capacity=%d bytes\n", - __func__, line_capacity); + pr_debug("%s(), line capacity=%d bytes\n", + __func__, line_capacity); return line_capacity; } @@ -737,8 +734,8 @@ static __u32 irlap_requested_line_capacity(struct qos_info *qos) irlap_min_turn_time_in_bytes(qos->baud_rate.value, qos->min_turn_time.value); - IRDA_DEBUG(2, "%s(), requested line capacity=%d\n", - __func__, line_capacity); + pr_debug("%s(), requested line capacity=%d\n", + __func__, line_capacity); return line_capacity; } diff --git a/net/irda/wrapper.c b/net/irda/wrapper.c index fd0995b1323a..40a0f993bf13 100644 --- a/net/irda/wrapper.c +++ b/net/irda/wrapper.c @@ -106,17 +106,17 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize) * Nothing to worry about, but we set the default number of * BOF's */ - IRDA_DEBUG(1, "%s(), wrong magic in skb!\n", __func__); + pr_debug("%s(), wrong magic in skb!\n", __func__); xbofs = 10; } else xbofs = cb->xbofs + cb->xbofs_delay; - IRDA_DEBUG(4, "%s(), xbofs=%d\n", __func__, xbofs); + pr_debug("%s(), xbofs=%d\n", __func__, xbofs); /* Check that we never use more than 115 + 48 xbofs */ if (xbofs > 163) { - IRDA_DEBUG(0, "%s(), too many xbofs (%d)\n", __func__, - xbofs); + pr_debug("%s(), too many xbofs (%d)\n", __func__, + xbofs); xbofs = 163; } @@ -134,8 +134,8 @@ int async_wrap_skb(struct sk_buff *skb, __u8 *tx_buff, int buffsize) * transmitted after this point is 5. */ if(n >= (buffsize-5)) { - IRDA_ERROR("%s(), tx buffer overflow (n=%d)\n", - __func__, n); + net_err_ratelimited("%s(), tx buffer overflow (n=%d)\n", + __func__, n); return n; } @@ -286,8 +286,8 @@ async_unwrap_bof(struct net_device *dev, case INSIDE_FRAME: /* Not supposed to happen, the previous frame is not * finished - Jean II */ - IRDA_DEBUG(1, "%s(), Discarding incomplete frame\n", - __func__); + pr_debug("%s(), Discarding incomplete frame\n", + __func__); stats->rx_errors++; stats->rx_missed_errors++; irda_device_set_media_busy(dev, TRUE); @@ -360,7 +360,7 @@ async_unwrap_eof(struct net_device *dev, /* Wrong CRC, discard frame! */ irda_device_set_media_busy(dev, TRUE); - IRDA_DEBUG(1, "%s(), crc error\n", __func__); + pr_debug("%s(), crc error\n", __func__); stats->rx_errors++; stats->rx_crc_errors++; } @@ -386,7 +386,7 @@ async_unwrap_ce(struct net_device *dev, break; case LINK_ESCAPE: - IRDA_WARNING("%s: state not defined\n", __func__); + net_warn_ratelimited("%s: state not defined\n", __func__); break; case BEGIN_FRAME: @@ -420,8 +420,8 @@ async_unwrap_other(struct net_device *dev, rx_buff->fcs = irda_fcs(rx_buff->fcs, byte); #endif } else { - IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", - __func__); + pr_debug("%s(), Rx buffer overflow, aborting\n", + __func__); rx_buff->state = OUTSIDE_FRAME; } break; @@ -439,8 +439,8 @@ async_unwrap_other(struct net_device *dev, #endif rx_buff->state = INSIDE_FRAME; } else { - IRDA_DEBUG(1, "%s(), Rx buffer overflow, aborting\n", - __func__); + pr_debug("%s(), Rx buffer overflow, aborting\n", + __func__); rx_buff->state = OUTSIDE_FRAME; } break; diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a089b6b91650..057b5647ef92 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1355,7 +1355,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN; cskb = skb; - if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) { + if (skb_copy_datagram_msg(cskb, offset, msg, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; diff --git a/net/key/af_key.c b/net/key/af_key.c index 1847ec4e3930..e5883091a8c6 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3654,7 +3654,7 @@ static int pfkey_recvmsg(struct kiocb *kiocb, } skb_reset_transport_header(skb); - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index edb78e69efe4..781b3a226ba7 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -126,6 +126,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { .ndo_uninit = l2tp_eth_dev_uninit, .ndo_start_xmit = l2tp_eth_dev_xmit, .ndo_get_stats64 = l2tp_eth_get_stats64, + .ndo_set_mac_address = eth_mac_addr, }; static void l2tp_eth_dev_setup(struct net_device *dev) diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 369a9822488c..a6cc1fed2b52 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -528,7 +528,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 0edb263cc002..2177b960da87 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -672,7 +672,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, copied = len; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index b704a9356208..c559bcdf4679 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -208,7 +208,7 @@ static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock, else if (len < skb->len) msg->msg_flags |= MSG_TRUNC; - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); + err = skb_copy_datagram_msg(skb, 0, msg, len); if (likely(err == 0)) err = len; diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c index 3cdaa046c1bc..fc60d9d738b5 100644 --- a/net/lapb/lapb_iface.c +++ b/net/lapb/lapb_iface.c @@ -73,6 +73,7 @@ static void __lapb_remove_cb(struct lapb_cb *lapb) lapb_put(lapb); } } +EXPORT_SYMBOL(lapb_register); /* * Add a socket to the bound sockets list. @@ -195,6 +196,7 @@ out: write_unlock_bh(&lapb_list_lock); return rc; } +EXPORT_SYMBOL(lapb_unregister); int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) { @@ -227,6 +229,7 @@ int lapb_getparms(struct net_device *dev, struct lapb_parms_struct *parms) out: return rc; } +EXPORT_SYMBOL(lapb_getparms); int lapb_setparms(struct net_device *dev, struct lapb_parms_struct *parms) { @@ -262,6 +265,7 @@ out_put: out: return rc; } +EXPORT_SYMBOL(lapb_setparms); int lapb_connect_request(struct net_device *dev) { @@ -290,6 +294,7 @@ out_put: out: return rc; } +EXPORT_SYMBOL(lapb_connect_request); int lapb_disconnect_request(struct net_device *dev) { @@ -334,6 +339,7 @@ out_put: out: return rc; } +EXPORT_SYMBOL(lapb_disconnect_request); int lapb_data_request(struct net_device *dev, struct sk_buff *skb) { @@ -355,6 +361,7 @@ out_put: out: return rc; } +EXPORT_SYMBOL(lapb_data_request); int lapb_data_received(struct net_device *dev, struct sk_buff *skb) { @@ -369,6 +376,7 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb) return rc; } +EXPORT_SYMBOL(lapb_data_received); void lapb_connect_confirmation(struct lapb_cb *lapb, int reason) { @@ -415,15 +423,6 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb) return used; } -EXPORT_SYMBOL(lapb_register); -EXPORT_SYMBOL(lapb_unregister); -EXPORT_SYMBOL(lapb_getparms); -EXPORT_SYMBOL(lapb_setparms); -EXPORT_SYMBOL(lapb_connect_request); -EXPORT_SYMBOL(lapb_disconnect_request); -EXPORT_SYMBOL(lapb_data_request); -EXPORT_SYMBOL(lapb_data_received); - static int __init lapb_init(void) { return 0; diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index bb9cbc17d926..af662669f951 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -819,8 +819,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, used = len; if (!(flags & MSG_TRUNC)) { - int rc = skb_copy_datagram_iovec(skb, offset, - msg->msg_iov, used); + int rc = skb_copy_datagram_msg(skb, offset, msg, used); if (rc) { /* Exception. Bailout! */ if (!copied) diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c index 25c31c0a3fdb..6daf391b3e84 100644 --- a/net/llc/llc_if.c +++ b/net/llc/llc_if.c @@ -15,7 +15,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> -#include <asm/errno.h> +#include <linux/errno.h> #include <net/llc_if.h> #include <net/llc_sap.h> #include <net/llc_s_ev.h> diff --git a/net/mpls/Makefile b/net/mpls/Makefile index 0a3c171be537..6dec088c2d0f 100644 --- a/net/mpls/Makefile +++ b/net/mpls/Makefile @@ -1,4 +1,4 @@ # # Makefile for MPLS. # -obj-y += mpls_gso.o +obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index e28ed2ef5b06..ca27837974fe 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -34,8 +34,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, SKB_GSO_TCP_ECN | SKB_GSO_GRE | SKB_GSO_GRE_CSUM | - SKB_GSO_IPIP | - SKB_GSO_MPLS))) + SKB_GSO_IPIP))) goto out; /* Setup inner SKB. */ @@ -48,7 +47,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, __skb_push(skb, skb->mac_len); /* Segment inner packet. */ - mpls_features = skb->dev->mpls_features & netif_skb_features(skb); + mpls_features = skb->dev->mpls_features & features; segs = skb_mac_gso_segment(skb, mpls_features); @@ -59,8 +58,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, * above pulled. It will be re-pushed after returning * skb_mac_gso_segment(), an indirect caller of this function. */ - __skb_push(skb, skb->data - skb_mac_header(skb)); - + __skb_pull(skb, skb->data - skb_mac_header(skb)); out: return segs; } diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 912e5a05b79d..86f9d76b1464 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -659,7 +659,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index) struct ip_set *set; struct ip_set_net *inst = ip_set_pernet(net); - if (index > inst->ip_set_max) + if (index >= inst->ip_set_max) return IPSET_INVALID_ID; nfnl_lock(NFNL_SUBSYS_IPSET); diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 91f17c1eb8a2..437a3663ad03 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, local))) { IP_VS_DBG_RL("We are crossing local and non-local addresses" - " daddr=%pI4\n", &dest->addr.ip); + " daddr=%pI4\n", &daddr); goto err_put; } @@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest, if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, local))) { IP_VS_DBG_RL("We are crossing local and non-local addresses" - " daddr=%pI6\n", &dest->addr.in6); + " daddr=%pI6\n", daddr); goto err_put; } diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 44d1ea32570a..d87b6423ffb2 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c @@ -213,7 +213,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { { /* REPLY */ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ -/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 }, +/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 }, /* * sNO -> sIV Never reached. * sSS -> sS2 Simultaneous open @@ -223,7 +223,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { * sFW -> sIV * sCW -> sIV * sLA -> sIV - * sTW -> sIV Reopened connection, but server may not do it. + * sTW -> sSS Reopened connection, but server may have switched role * sCL -> sIV */ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 556a0dfa4abc..11ab4b078f3b 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1328,10 +1328,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb, basechain->stats = stats; } else { stats = netdev_alloc_pcpu_stats(struct nft_stats); - if (IS_ERR(stats)) { + if (stats == NULL) { module_put(type->owner); kfree(basechain); - return PTR_ERR(stats); + return -ENOMEM; } rcu_assign_pointer(basechain->stats, stats); } @@ -3744,6 +3744,20 @@ static const struct nfnetlink_subsystem nf_tables_subsys = { .abort = nf_tables_abort, }; +int nft_chain_validate_dependency(const struct nft_chain *chain, + enum nft_chain_type type) +{ + const struct nft_base_chain *basechain; + + if (chain->flags & NFT_BASE_CHAIN) { + basechain = nft_base_chain(chain); + if (basechain->type->type != type) + return -EOPNOTSUPP; + } + return 0; +} +EXPORT_SYMBOL_GPL(nft_chain_validate_dependency); + /* * Loop detection - walk through the ruleset beginning at the destination chain * of a new jump until either the source chain is reached (loop) or all diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index b1e3a0579416..5f1be5ba3559 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -43,7 +43,8 @@ #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ -#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ +/* max packet size is limited by 16-bit struct nfattr nfa_len field */ +#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN) #define PRINTR(x, args...) do { if (net_ratelimit()) \ printk(x, ## args); } while (0); @@ -252,6 +253,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, case NFULNL_COPY_PACKET: inst->copy_mode = mode; + if (range == 0) + range = NFULNL_COPY_RANGE_MAX; inst->copy_range = min_t(unsigned int, range, NFULNL_COPY_RANGE_MAX); break; @@ -343,26 +346,25 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size, return skb; } -static int +static void __nfulnl_send(struct nfulnl_instance *inst) { - int status = -1; - if (inst->qlen > 1) { struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, NLMSG_DONE, sizeof(struct nfgenmsg), 0); - if (!nlh) + if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n", + inst->skb->len, skb_tailroom(inst->skb))) { + kfree_skb(inst->skb); goto out; + } } - status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, - MSG_DONTWAIT); - + nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, + MSG_DONTWAIT); +out: inst->qlen = 0; inst->skb = NULL; -out: - return status; } static void @@ -649,7 +651,8 @@ nfulnl_log_packet(struct net *net, + nla_total_size(sizeof(u_int32_t)) /* gid */ + nla_total_size(plen) /* prefix */ + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) - + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); + + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)) + + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */ if (in && skb_mac_header_was_set(skb)) { size += nla_total_size(skb->dev->hard_header_len) @@ -678,8 +681,7 @@ nfulnl_log_packet(struct net *net, break; case NFULNL_COPY_PACKET: - if (inst->copy_range == 0 - || inst->copy_range > skb->len) + if (inst->copy_range > skb->len) data_len = skb->len; else data_len = inst->copy_range; @@ -692,8 +694,7 @@ nfulnl_log_packet(struct net *net, goto unlock_and_release; } - if (inst->skb && - size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { + if (inst->skb && size > skb_tailroom(inst->skb)) { /* either the queue len is too high or we don't have * enough room in the skb left. flush to userspace. */ __nfulnl_flush(inst); diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index a82077d9f59b..7c60ccd61a3e 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum) * returned by nf_queue. For instance, callers rely on -ECANCELED to * mean 'ignore this hook'. */ - if (IS_ERR(segs)) + if (IS_ERR_OR_NULL(segs)) goto out_err; queued = 0; err = 0; diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 7e2683c8a44a..9d6d6f60a80f 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -19,9 +19,52 @@ #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> -#include <asm/uaccess.h> /* for set_fs */ #include <net/netfilter/nf_tables.h> +static const struct { + const char *name; + u8 type; +} table_to_chaintype[] = { + { "filter", NFT_CHAIN_T_DEFAULT }, + { "raw", NFT_CHAIN_T_DEFAULT }, + { "security", NFT_CHAIN_T_DEFAULT }, + { "mangle", NFT_CHAIN_T_ROUTE }, + { "nat", NFT_CHAIN_T_NAT }, + { }, +}; + +static int nft_compat_table_to_chaintype(const char *table) +{ + int i; + + for (i = 0; table_to_chaintype[i].name != NULL; i++) { + if (strcmp(table_to_chaintype[i].name, table) == 0) + return table_to_chaintype[i].type; + } + + return -1; +} + +static int nft_compat_chain_validate_dependency(const char *tablename, + const struct nft_chain *chain) +{ + enum nft_chain_type type; + const struct nft_base_chain *basechain; + + if (!tablename || !(chain->flags & NFT_BASE_CHAIN)) + return 0; + + type = nft_compat_table_to_chaintype(tablename); + if (type < 0) + return -EINVAL; + + basechain = nft_base_chain(chain); + if (basechain->type->type != type) + return -EINVAL; + + return 0; +} + union nft_entry { struct ipt_entry e4; struct ip6t_entry e6; @@ -95,6 +138,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par, const struct nf_hook_ops *ops = &basechain->ops[0]; par->hook_mask = 1 << ops->hooknum; + } else { + par->hook_mask = 0; } par->family = ctx->afi->family; } @@ -151,6 +196,10 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, union nft_entry e = {}; int ret; + ret = nft_compat_chain_validate_dependency(target->table, ctx->chain); + if (ret < 0) + goto err; + target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); if (ctx->nla[NFTA_RULE_COMPAT]) { @@ -216,6 +265,7 @@ static int nft_target_validate(const struct nft_ctx *ctx, { struct xt_target *target = expr->ops->data; unsigned int hook_mask = 0; + int ret; if (ctx->chain->flags & NFT_BASE_CHAIN) { const struct nft_base_chain *basechain = @@ -223,11 +273,13 @@ static int nft_target_validate(const struct nft_ctx *ctx, const struct nf_hook_ops *ops = &basechain->ops[0]; hook_mask = 1 << ops->hooknum; - if (hook_mask & target->hooks) - return 0; + if (!(hook_mask & target->hooks)) + return -EINVAL; - /* This target is being called from an invalid chain */ - return -EINVAL; + ret = nft_compat_chain_validate_dependency(target->table, + ctx->chain); + if (ret < 0) + return ret; } return 0; } @@ -293,6 +345,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx, const struct nf_hook_ops *ops = &basechain->ops[0]; par->hook_mask = 1 << ops->hooknum; + } else { + par->hook_mask = 0; } par->family = ctx->afi->family; } @@ -320,6 +374,10 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, union nft_entry e = {}; int ret; + ret = nft_compat_chain_validate_dependency(match->name, ctx->chain); + if (ret < 0) + goto err; + match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); if (ctx->nla[NFTA_RULE_COMPAT]) { @@ -379,6 +437,7 @@ static int nft_match_validate(const struct nft_ctx *ctx, { struct xt_match *match = expr->ops->data; unsigned int hook_mask = 0; + int ret; if (ctx->chain->flags & NFT_BASE_CHAIN) { const struct nft_base_chain *basechain = @@ -386,11 +445,13 @@ static int nft_match_validate(const struct nft_ctx *ctx, const struct nf_hook_ops *ops = &basechain->ops[0]; hook_mask = 1 << ops->hooknum; - if (hook_mask & match->hooks) - return 0; + if (!(hook_mask & match->hooks)) + return -EINVAL; - /* This match is being called from an invalid chain */ - return -EINVAL; + ret = nft_compat_chain_validate_dependency(match->name, + ctx->chain); + if (ret < 0) + return ret; } return 0; } @@ -611,7 +672,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, family = ctx->afi->family; /* Re-use the existing target if it's already loaded. */ - list_for_each_entry(nft_target, &nft_match_list, head) { + list_for_each_entry(nft_target, &nft_target_list, head) { struct xt_target *target = nft_target->ops.data; if (strcmp(target->name, tg_name) == 0 && diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 8892b7b6184a..1e316ce4cb5d 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -65,7 +65,7 @@ static int nft_hash_insert(const struct nft_set *set, if (set->flags & NFT_SET_MAP) nft_data_copy(he->data, &elem->data); - rhashtable_insert(priv, &he->node, GFP_KERNEL); + rhashtable_insert(priv, &he->node); return 0; } @@ -88,7 +88,7 @@ static void nft_hash_remove(const struct nft_set *set, pprev = elem->cookie; he = rht_dereference((*pprev), priv); - rhashtable_remove_pprev(priv, he, pprev, GFP_KERNEL); + rhashtable_remove_pprev(priv, he, pprev); synchronize_rcu(); kfree(he); @@ -153,10 +153,12 @@ static unsigned int nft_hash_privsize(const struct nlattr * const nla[]) return sizeof(struct rhashtable); } -static int lockdep_nfnl_lock_is_held(void) +#ifdef CONFIG_PROVE_LOCKING +static int lockdep_nfnl_lock_is_held(void *parent) { return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES); } +#endif static int nft_hash_init(const struct nft_set *set, const struct nft_set_desc *desc, @@ -171,7 +173,9 @@ static int nft_hash_init(const struct nft_set *set, .hashfn = jhash, .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, +#ifdef CONFIG_PROVE_LOCKING .mutex_is_held = lockdep_nfnl_lock_is_held, +#endif }; return rhashtable_init(priv, ¶ms); diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index 6637bab00567..d1ffd5eb3a9b 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c @@ -26,6 +26,11 @@ int nft_masq_init(const struct nft_ctx *ctx, const struct nlattr * const tb[]) { struct nft_masq *priv = nft_expr_priv(expr); + int err; + + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; if (tb[NFTA_MASQ_FLAGS] == NULL) return 0; @@ -55,5 +60,12 @@ nla_put_failure: } EXPORT_SYMBOL_GPL(nft_masq_dump); +int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); +} +EXPORT_SYMBOL_GPL(nft_masq_validate); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 799550b476fb..afe2b0b45ec4 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -95,7 +95,13 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, u32 family; int err; - if (tb[NFTA_NAT_TYPE] == NULL) + err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); + if (err < 0) + return err; + + if (tb[NFTA_NAT_TYPE] == NULL || + (tb[NFTA_NAT_REG_ADDR_MIN] == NULL && + tb[NFTA_NAT_REG_PROTO_MIN] == NULL)) return -EINVAL; switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { @@ -120,38 +126,44 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, priv->family = family; if (tb[NFTA_NAT_REG_ADDR_MIN]) { - priv->sreg_addr_min = ntohl(nla_get_be32( - tb[NFTA_NAT_REG_ADDR_MIN])); + priv->sreg_addr_min = + ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MIN])); + err = nft_validate_input_register(priv->sreg_addr_min); if (err < 0) return err; - } - if (tb[NFTA_NAT_REG_ADDR_MAX]) { - priv->sreg_addr_max = ntohl(nla_get_be32( - tb[NFTA_NAT_REG_ADDR_MAX])); - err = nft_validate_input_register(priv->sreg_addr_max); - if (err < 0) - return err; - } else - priv->sreg_addr_max = priv->sreg_addr_min; + if (tb[NFTA_NAT_REG_ADDR_MAX]) { + priv->sreg_addr_max = + ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MAX])); + + err = nft_validate_input_register(priv->sreg_addr_max); + if (err < 0) + return err; + } else { + priv->sreg_addr_max = priv->sreg_addr_min; + } + } if (tb[NFTA_NAT_REG_PROTO_MIN]) { - priv->sreg_proto_min = ntohl(nla_get_be32( - tb[NFTA_NAT_REG_PROTO_MIN])); + priv->sreg_proto_min = + ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MIN])); + err = nft_validate_input_register(priv->sreg_proto_min); if (err < 0) return err; - } - if (tb[NFTA_NAT_REG_PROTO_MAX]) { - priv->sreg_proto_max = ntohl(nla_get_be32( - tb[NFTA_NAT_REG_PROTO_MAX])); - err = nft_validate_input_register(priv->sreg_proto_max); - if (err < 0) - return err; - } else - priv->sreg_proto_max = priv->sreg_proto_min; + if (tb[NFTA_NAT_REG_PROTO_MAX]) { + priv->sreg_proto_max = + ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MAX])); + + err = nft_validate_input_register(priv->sreg_proto_max); + if (err < 0) + return err; + } else { + priv->sreg_proto_max = priv->sreg_proto_min; + } + } if (tb[NFTA_NAT_FLAGS]) { priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); @@ -179,17 +191,19 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr) if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family))) goto nla_put_failure; - if (nla_put_be32(skb, - NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min))) - goto nla_put_failure; - if (nla_put_be32(skb, - NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) - goto nla_put_failure; + + if (priv->sreg_addr_min) { + if (nla_put_be32(skb, NFTA_NAT_REG_ADDR_MIN, + htonl(priv->sreg_addr_min)) || + nla_put_be32(skb, NFTA_NAT_REG_ADDR_MAX, + htonl(priv->sreg_addr_max))) + goto nla_put_failure; + } + if (priv->sreg_proto_min) { if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN, - htonl(priv->sreg_proto_min))) - goto nla_put_failure; - if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX, + htonl(priv->sreg_proto_min)) || + nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max))) goto nla_put_failure; } @@ -205,6 +219,13 @@ nla_put_failure: return -1; } +static int nft_nat_validate(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nft_data **data) +{ + return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT); +} + static struct nft_expr_type nft_nat_type; static const struct nft_expr_ops nft_nat_ops = { .type = &nft_nat_type, @@ -212,6 +233,7 @@ static const struct nft_expr_ops nft_nat_ops = { .eval = nft_nat_eval, .init = nft_nat_init, .dump = nft_nat_dump, + .validate = nft_nat_validate, }; static struct nft_expr_type nft_nat_type __read_mostly = { diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7a186e74b1b3..e1aad6eeac14 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); static void netlink_skb_destructor(struct sk_buff *skb); +/* nl_table locking explained: + * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock + * combined with an RCU read-side lock. Insertion and removal are protected + * with nl_sk_hash_lock while using RCU list modification primitives and may + * run in parallel to nl_table_lock protected lookups. Destruction of the + * Netlink socket may only occur *after* nl_table_lock has been acquired + * either during or after the socket has been removed from the list. + */ DEFINE_RWLOCK(nl_table_lock); EXPORT_SYMBOL_GPL(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); @@ -106,14 +114,14 @@ static atomic_t nl_table_users = ATOMIC_INIT(0); DEFINE_MUTEX(nl_sk_hash_lock); EXPORT_SYMBOL_GPL(nl_sk_hash_lock); -static int lockdep_nl_sk_hash_is_held(void) +#ifdef CONFIG_PROVE_LOCKING +static int lockdep_nl_sk_hash_is_held(void *parent) { -#ifdef CONFIG_LOCKDEP - return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1; -#else + if (debug_locks) + return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock); return 1; -#endif } +#endif static ATOMIC_NOTIFIER_HEAD(netlink_chain); @@ -134,8 +142,7 @@ int netlink_add_tap(struct netlink_tap *nt) list_add_rcu(&nt->list, &netlink_tap_all); spin_unlock(&netlink_tap_lock); - if (nt->module) - __module_get(nt->module); + __module_get(nt->module); return 0; } @@ -1028,11 +1035,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) struct netlink_table *table = &nl_table[protocol]; struct sock *sk; + read_lock(&nl_table_lock); rcu_read_lock(); sk = __netlink_lookup(table, portid, net); if (sk) sock_hold(sk); rcu_read_unlock(); + read_unlock(&nl_table_lock); return sk; } @@ -1082,7 +1091,7 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid) nlk_sk(sk)->portid = portid; sock_hold(sk); - rhashtable_insert(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL); + rhashtable_insert(&table->hash, &nlk_sk(sk)->node); err = 0; err: mutex_unlock(&nl_sk_hash_lock); @@ -1095,7 +1104,7 @@ static void netlink_remove(struct sock *sk) mutex_lock(&nl_sk_hash_lock); table = &nl_table[sk->sk_protocol]; - if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL)) { + if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) { WARN_ON(atomic_read(&sk->sk_refcnt) == 1); __sock_put(sk); } @@ -1257,9 +1266,6 @@ static int netlink_release(struct socket *sock) } netlink_table_ungrab(); - /* Wait for readers to complete */ - synchronize_net(); - kfree(nlk->groups); nlk->groups = NULL; @@ -1281,6 +1287,7 @@ static int netlink_autobind(struct socket *sock) retry: cond_resched(); + netlink_table_grab(); rcu_read_lock(); if (__netlink_lookup(table, portid, net)) { /* Bind collision, search negative portid values. */ @@ -1288,9 +1295,11 @@ retry: if (rover > -4097) rover = -4097; rcu_read_unlock(); + netlink_table_ungrab(); goto retry; } rcu_read_unlock(); + netlink_table_ungrab(); err = netlink_insert(sk, net, portid); if (err == -EADDRINUSE) @@ -1430,7 +1439,7 @@ static void netlink_unbind(int group, long unsigned int groups, return; for (undo = 0; undo < group; undo++) - if (test_bit(group, &groups)) + if (test_bit(undo, &groups)) nlk->netlink_unbind(undo); } @@ -1482,7 +1491,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, netlink_insert(sk, net, nladdr->nl_pid) : netlink_autobind(sock); if (err) { - netlink_unbind(nlk->ngroups - 1, groups, nlk); + netlink_unbind(nlk->ngroups, groups, nlk); return err; } } @@ -2391,7 +2400,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, } skb_reset_transport_header(data_skb); - err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(data_skb, 0, msg, copied); if (msg->msg_name) { DECLARE_SOCKADDR(struct sockaddr_nl *, addr, msg->msg_name); @@ -2499,6 +2508,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, nl_table[unit].module = module; if (cfg) { nl_table[unit].bind = cfg->bind; + nl_table[unit].unbind = cfg->unbind; nl_table[unit].flags = cfg->flags; if (cfg->compare) nl_table[unit].compare = cfg->compare; @@ -2921,14 +2931,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) } static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) - __acquires(RCU) + __acquires(nl_table_lock) __acquires(RCU) { + read_lock(&nl_table_lock); rcu_read_lock(); return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { + struct rhashtable *ht; struct netlink_sock *nlk; struct nl_seq_iter *iter; struct net *net; @@ -2943,19 +2955,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) iter = seq->private; nlk = v; - rht_for_each_entry_rcu(nlk, nlk->node.next, node) + i = iter->link; + ht = &nl_table[i].hash; + rht_for_each_entry(nlk, nlk->node.next, ht, node) if (net_eq(sock_net((struct sock *)nlk), net)) return nlk; - i = iter->link; j = iter->hash_idx + 1; do { - struct rhashtable *ht = &nl_table[i].hash; const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); for (; j < tbl->size; j++) { - rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) { + rht_for_each_entry(nlk, tbl->buckets[j], ht, node) { if (net_eq(sock_net((struct sock *)nlk), net)) { iter->link = i; iter->hash_idx = j; @@ -2971,9 +2983,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) } static void netlink_seq_stop(struct seq_file *seq, void *v) - __releases(RCU) + __releases(RCU) __releases(nl_table_lock) { rcu_read_unlock(); + read_unlock(&nl_table_lock); } @@ -3120,7 +3133,9 @@ static int __init netlink_proto_init(void) .max_shift = 16, /* 64K */ .grow_decision = rht_grow_above_75, .shrink_decision = rht_shrink_below_30, +#ifdef CONFIG_PROVE_LOCKING .mutex_is_held = lockdep_nl_sk_hash_is_held, +#endif }; if (err != 0) diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 1b06a1fcf3e8..7e13f6afcd1f 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1167,7 +1167,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + er = skb_copy_datagram_msg(skb, 0, msg, copied); if (er < 0) { skb_free_datagram(sk, skb); release_sock(sk); diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c index 51f077a92fa9..83bc785d5855 100644 --- a/net/nfc/llcp_sock.c +++ b/net/nfc/llcp_sock.c @@ -832,7 +832,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock, copied = min_t(unsigned int, rlen, len); cskb = skb; - if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { + if (skb_copy_datagram_msg(cskb, 0, msg, copied)) { if (!(flags & MSG_PEEK)) skb_queue_head(&sk->sk_receive_queue, skb); return -EFAULT; diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c index 11c3544ea546..9d7d2b7ba5e4 100644 --- a/net/nfc/rawsock.c +++ b/net/nfc/rawsock.c @@ -269,7 +269,7 @@ static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock, copied = len; } - rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + rc = skb_copy_datagram_msg(skb, 0, msg, copied); skb_free_datagram(sk, skb); diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index ba3bb8203b99..b7d818c59423 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -4,7 +4,9 @@ config OPENVSWITCH tristate "Open vSwitch" + depends on INET select LIBCRC32C + select NET_MPLS_GSO ---help--- Open vSwitch is a multilayer Ethernet switch targeted at virtualized environments. In addition to supporting a variety of features @@ -29,11 +31,10 @@ config OPENVSWITCH If unsure, say N. config OPENVSWITCH_GRE - bool "Open vSwitch GRE tunneling support" - depends on INET + tristate "Open vSwitch GRE tunneling support" depends on OPENVSWITCH - depends on NET_IPGRE_DEMUX && !(OPENVSWITCH=y && NET_IPGRE_DEMUX=m) - default y + depends on NET_IPGRE_DEMUX + default OPENVSWITCH ---help--- If you say Y here, then the Open vSwitch will be able create GRE vport. @@ -43,11 +44,10 @@ config OPENVSWITCH_GRE If unsure, say Y. config OPENVSWITCH_VXLAN - bool "Open vSwitch VXLAN tunneling support" - depends on INET + tristate "Open vSwitch VXLAN tunneling support" depends on OPENVSWITCH - depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m) - default y + depends on VXLAN + default OPENVSWITCH ---help--- If you say Y here, then the Open vSwitch will be able create vxlan vport. @@ -56,11 +56,10 @@ config OPENVSWITCH_VXLAN If unsure, say Y. config OPENVSWITCH_GENEVE - bool "Open vSwitch Geneve tunneling support" - depends on INET + tristate "Open vSwitch Geneve tunneling support" depends on OPENVSWITCH - depends on GENEVE && !(OPENVSWITCH=y && GENEVE=m) - default y + depends on GENEVE + default OPENVSWITCH ---help--- If you say Y here, then the Open vSwitch will be able create geneve vport. diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile index 9a33a273c375..91b9478413ef 100644 --- a/net/openvswitch/Makefile +++ b/net/openvswitch/Makefile @@ -15,14 +15,6 @@ openvswitch-y := \ vport-internal_dev.o \ vport-netdev.o -ifneq ($(CONFIG_OPENVSWITCH_GENEVE),) -openvswitch-y += vport-geneve.o -endif - -ifneq ($(CONFIG_OPENVSWITCH_VXLAN),) -openvswitch-y += vport-vxlan.o -endif - -ifneq ($(CONFIG_OPENVSWITCH_GRE),) -openvswitch-y += vport-gre.o -endif +obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o +obj-$(CONFIG_OPENVSWITCH_VXLAN) += vport-vxlan.o +obj-$(CONFIG_OPENVSWITCH_GRE) += vport-gre.o diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 006886dbee36..4e05ea1c2d11 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -28,10 +28,12 @@ #include <linux/in6.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> + #include <net/ip.h> #include <net/ipv6.h> #include <net/checksum.h> #include <net/dsfield.h> +#include <net/mpls.h> #include <net/sctp/checksum.h> #include "datapath.h" @@ -67,7 +69,7 @@ static void action_fifo_init(struct action_fifo *fifo) fifo->tail = 0; } -static bool action_fifo_is_empty(struct action_fifo *fifo) +static bool action_fifo_is_empty(const struct action_fifo *fifo) { return (fifo->head == fifo->tail); } @@ -90,7 +92,7 @@ static struct deferred_action *action_fifo_put(struct action_fifo *fifo) /* Return true if fifo is not full */ static struct deferred_action *add_deferred_actions(struct sk_buff *skb, - struct sw_flow_key *key, + const struct sw_flow_key *key, const struct nlattr *attr) { struct action_fifo *fifo; @@ -107,100 +109,131 @@ static struct deferred_action *add_deferred_actions(struct sk_buff *skb, return da; } -static int make_writable(struct sk_buff *skb, int write_len) +static void invalidate_flow_key(struct sw_flow_key *key) +{ + key->eth.type = htons(0); +} + +static bool is_flow_key_valid(const struct sw_flow_key *key) { - if (!pskb_may_pull(skb, write_len)) + return !!key->eth.type; +} + +static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_action_push_mpls *mpls) +{ + __be32 *new_mpls_lse; + struct ethhdr *hdr; + + /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ + if (skb->encapsulation) + return -ENOTSUPP; + + if (skb_cow_head(skb, MPLS_HLEN) < 0) return -ENOMEM; - if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) - return 0; + skb_push(skb, MPLS_HLEN); + memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), + skb->mac_len); + skb_reset_mac_header(skb); + + new_mpls_lse = (__be32 *)skb_mpls_header(skb); + *new_mpls_lse = mpls->mpls_lse; - return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_add(skb->csum, csum_partial(new_mpls_lse, + MPLS_HLEN, 0)); + + hdr = eth_hdr(skb); + hdr->h_proto = mpls->mpls_ethertype; + + skb_set_inner_protocol(skb, skb->protocol); + skb->protocol = mpls->mpls_ethertype; + + invalidate_flow_key(key); + return 0; } -/* remove VLAN header from packet and update csum accordingly. */ -static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) +static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key, + const __be16 ethertype) { - struct vlan_hdr *vhdr; + struct ethhdr *hdr; int err; - err = make_writable(skb, VLAN_ETH_HLEN); + err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); if (unlikely(err)) return err; - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_sub(skb->csum, csum_partial(skb->data - + (2 * ETH_ALEN), VLAN_HLEN, 0)); + skb_postpull_rcsum(skb, skb_mpls_header(skb), MPLS_HLEN); - vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); - *current_tci = vhdr->h_vlan_TCI; + memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), + skb->mac_len); - memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); - __skb_pull(skb, VLAN_HLEN); + __skb_pull(skb, MPLS_HLEN); + skb_reset_mac_header(skb); - vlan_set_encap_proto(skb, vhdr); - skb->mac_header += VLAN_HLEN; - if (skb_network_offset(skb) < ETH_HLEN) - skb_set_network_header(skb, ETH_HLEN); - skb_reset_mac_len(skb); + /* skb_mpls_header() is used to locate the ethertype + * field correctly in the presence of VLAN tags. + */ + hdr = (struct ethhdr *)(skb_mpls_header(skb) - ETH_HLEN); + hdr->h_proto = ethertype; + if (eth_p_mpls(skb->protocol)) + skb->protocol = ethertype; + invalidate_flow_key(key); return 0; } -static int pop_vlan(struct sk_buff *skb) +static int set_mpls(struct sk_buff *skb, struct sw_flow_key *key, + const __be32 *mpls_lse) { - __be16 tci; + __be32 *stack; int err; - if (likely(vlan_tx_tag_present(skb))) { - skb->vlan_tci = 0; - } else { - if (unlikely(skb->protocol != htons(ETH_P_8021Q) || - skb->len < VLAN_ETH_HLEN)) - return 0; - - err = __pop_vlan_tci(skb, &tci); - if (err) - return err; - } - /* move next vlan tag to hw accel tag */ - if (likely(skb->protocol != htons(ETH_P_8021Q) || - skb->len < VLAN_ETH_HLEN)) - return 0; - - err = __pop_vlan_tci(skb, &tci); + err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); if (unlikely(err)) return err; - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(tci)); + stack = (__be32 *)skb_mpls_header(skb); + if (skb->ip_summed == CHECKSUM_COMPLETE) { + __be32 diff[] = { ~(*stack), *mpls_lse }; + skb->csum = ~csum_partial((char *)diff, sizeof(diff), + ~skb->csum); + } + + *stack = *mpls_lse; + key->mpls.top_lse = *mpls_lse; return 0; } -static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vlan) +static int pop_vlan(struct sk_buff *skb, struct sw_flow_key *key) { - if (unlikely(vlan_tx_tag_present(skb))) { - u16 current_tag; - - /* push down current VLAN tag */ - current_tag = vlan_tx_tag_get(skb); - - if (!__vlan_put_tag(skb, skb->vlan_proto, current_tag)) - return -ENOMEM; + int err; - if (skb->ip_summed == CHECKSUM_COMPLETE) - skb->csum = csum_add(skb->csum, csum_partial(skb->data - + (2 * ETH_ALEN), VLAN_HLEN, 0)); + err = skb_vlan_pop(skb); + if (vlan_tx_tag_present(skb)) + invalidate_flow_key(key); + else + key->eth.tci = 0; + return err; +} - } - __vlan_hwaccel_put_tag(skb, vlan->vlan_tpid, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); - return 0; +static int push_vlan(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_action_push_vlan *vlan) +{ + if (vlan_tx_tag_present(skb)) + invalidate_flow_key(key); + else + key->eth.tci = vlan->vlan_tci; + return skb_vlan_push(skb, vlan->vlan_tpid, + ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); } -static int set_eth_addr(struct sk_buff *skb, +static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *key, const struct ovs_key_ethernet *eth_key) { int err; - err = make_writable(skb, ETH_HLEN); + err = skb_ensure_writable(skb, ETH_HLEN); if (unlikely(err)) return err; @@ -211,11 +244,13 @@ static int set_eth_addr(struct sk_buff *skb, ovs_skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); + ether_addr_copy(key->eth.src, eth_key->eth_src); + ether_addr_copy(key->eth.dst, eth_key->eth_dst); return 0; } static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, - __be32 *addr, __be32 new_addr) + __be32 *addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); @@ -294,42 +329,52 @@ static void set_ip_ttl(struct sk_buff *skb, struct iphdr *nh, u8 new_ttl) nh->ttl = new_ttl; } -static int set_ipv4(struct sk_buff *skb, const struct ovs_key_ipv4 *ipv4_key) +static int set_ipv4(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_ipv4 *ipv4_key) { struct iphdr *nh; int err; - err = make_writable(skb, skb_network_offset(skb) + - sizeof(struct iphdr)); + err = skb_ensure_writable(skb, skb_network_offset(skb) + + sizeof(struct iphdr)); if (unlikely(err)) return err; nh = ip_hdr(skb); - if (ipv4_key->ipv4_src != nh->saddr) + if (ipv4_key->ipv4_src != nh->saddr) { set_ip_addr(skb, nh, &nh->saddr, ipv4_key->ipv4_src); + key->ipv4.addr.src = ipv4_key->ipv4_src; + } - if (ipv4_key->ipv4_dst != nh->daddr) + if (ipv4_key->ipv4_dst != nh->daddr) { set_ip_addr(skb, nh, &nh->daddr, ipv4_key->ipv4_dst); + key->ipv4.addr.dst = ipv4_key->ipv4_dst; + } - if (ipv4_key->ipv4_tos != nh->tos) + if (ipv4_key->ipv4_tos != nh->tos) { ipv4_change_dsfield(nh, 0, ipv4_key->ipv4_tos); + key->ip.tos = nh->tos; + } - if (ipv4_key->ipv4_ttl != nh->ttl) + if (ipv4_key->ipv4_ttl != nh->ttl) { set_ip_ttl(skb, nh, ipv4_key->ipv4_ttl); + key->ip.ttl = ipv4_key->ipv4_ttl; + } return 0; } -static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) +static int set_ipv6(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_ipv6 *ipv6_key) { struct ipv6hdr *nh; int err; __be32 *saddr; __be32 *daddr; - err = make_writable(skb, skb_network_offset(skb) + - sizeof(struct ipv6hdr)); + err = skb_ensure_writable(skb, skb_network_offset(skb) + + sizeof(struct ipv6hdr)); if (unlikely(err)) return err; @@ -337,9 +382,12 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) saddr = (__be32 *)&nh->saddr; daddr = (__be32 *)&nh->daddr; - if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) + if (memcmp(ipv6_key->ipv6_src, saddr, sizeof(ipv6_key->ipv6_src))) { set_ipv6_addr(skb, ipv6_key->ipv6_proto, saddr, ipv6_key->ipv6_src, true); + memcpy(&key->ipv6.addr.src, ipv6_key->ipv6_src, + sizeof(ipv6_key->ipv6_src)); + } if (memcmp(ipv6_key->ipv6_dst, daddr, sizeof(ipv6_key->ipv6_dst))) { unsigned int offset = 0; @@ -353,16 +401,22 @@ static int set_ipv6(struct sk_buff *skb, const struct ovs_key_ipv6 *ipv6_key) set_ipv6_addr(skb, ipv6_key->ipv6_proto, daddr, ipv6_key->ipv6_dst, recalc_csum); + memcpy(&key->ipv6.addr.dst, ipv6_key->ipv6_dst, + sizeof(ipv6_key->ipv6_dst)); } set_ipv6_tc(nh, ipv6_key->ipv6_tclass); + key->ip.tos = ipv6_get_dsfield(nh); + set_ipv6_fl(nh, ntohl(ipv6_key->ipv6_label)); - nh->hop_limit = ipv6_key->ipv6_hlimit; + key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); + nh->hop_limit = ipv6_key->ipv6_hlimit; + key->ip.ttl = ipv6_key->ipv6_hlimit; return 0; } -/* Must follow make_writable() since that can move the skb data. */ +/* Must follow skb_ensure_writable() since that can move the skb data. */ static void set_tp_port(struct sk_buff *skb, __be16 *port, __be16 new_port, __sum16 *check) { @@ -386,54 +440,64 @@ static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) } } -static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) +static int set_udp(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_udp *udp_port_key) { struct udphdr *uh; int err; - err = make_writable(skb, skb_transport_offset(skb) + - sizeof(struct udphdr)); + err = skb_ensure_writable(skb, skb_transport_offset(skb) + + sizeof(struct udphdr)); if (unlikely(err)) return err; uh = udp_hdr(skb); - if (udp_port_key->udp_src != uh->source) + if (udp_port_key->udp_src != uh->source) { set_udp_port(skb, &uh->source, udp_port_key->udp_src); + key->tp.src = udp_port_key->udp_src; + } - if (udp_port_key->udp_dst != uh->dest) + if (udp_port_key->udp_dst != uh->dest) { set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); + key->tp.dst = udp_port_key->udp_dst; + } return 0; } -static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) +static int set_tcp(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_tcp *tcp_port_key) { struct tcphdr *th; int err; - err = make_writable(skb, skb_transport_offset(skb) + - sizeof(struct tcphdr)); + err = skb_ensure_writable(skb, skb_transport_offset(skb) + + sizeof(struct tcphdr)); if (unlikely(err)) return err; th = tcp_hdr(skb); - if (tcp_port_key->tcp_src != th->source) + if (tcp_port_key->tcp_src != th->source) { set_tp_port(skb, &th->source, tcp_port_key->tcp_src, &th->check); + key->tp.src = tcp_port_key->tcp_src; + } - if (tcp_port_key->tcp_dst != th->dest) + if (tcp_port_key->tcp_dst != th->dest) { set_tp_port(skb, &th->dest, tcp_port_key->tcp_dst, &th->check); + key->tp.dst = tcp_port_key->tcp_dst; + } return 0; } -static int set_sctp(struct sk_buff *skb, - const struct ovs_key_sctp *sctp_port_key) +static int set_sctp(struct sk_buff *skb, struct sw_flow_key *key, + const struct ovs_key_sctp *sctp_port_key) { struct sctphdr *sh; int err; unsigned int sctphoff = skb_transport_offset(skb); - err = make_writable(skb, sctphoff + sizeof(struct sctphdr)); + err = skb_ensure_writable(skb, sctphoff + sizeof(struct sctphdr)); if (unlikely(err)) return err; @@ -454,39 +518,35 @@ static int set_sctp(struct sk_buff *skb, sh->checksum = old_csum ^ old_correct_csum ^ new_csum; skb_clear_hash(skb); + key->tp.src = sctp_port_key->sctp_src; + key->tp.dst = sctp_port_key->sctp_dst; } return 0; } -static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port) +static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port) { - struct vport *vport; + struct vport *vport = ovs_vport_rcu(dp, out_port); - if (unlikely(!skb)) - return -ENOMEM; - - vport = ovs_vport_rcu(dp, out_port); - if (unlikely(!vport)) { + if (likely(vport)) + ovs_vport_send(vport, skb); + else kfree_skb(skb); - return -ENODEV; - } - - ovs_vport_send(vport, skb); - return 0; } static int output_userspace(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, const struct nlattr *attr) { + struct ovs_tunnel_info info; struct dp_upcall_info upcall; const struct nlattr *a; int rem; upcall.cmd = OVS_PACKET_CMD_ACTION; - upcall.key = key; upcall.userdata = NULL; upcall.portid = 0; + upcall.egress_tun_info = NULL; for (a = nla_data(attr), rem = nla_len(attr); rem > 0; a = nla_next(a, &rem)) { @@ -498,15 +558,27 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb, case OVS_USERSPACE_ATTR_PID: upcall.portid = nla_get_u32(a); break; + + case OVS_USERSPACE_ATTR_EGRESS_TUN_PORT: { + /* Get out tunnel info. */ + struct vport *vport; + + vport = ovs_vport_rcu(dp, nla_get_u32(a)); + if (vport) { + int err; + + err = ovs_vport_get_egress_tun_info(vport, skb, + &info); + if (!err) + upcall.egress_tun_info = &info; + } + break; } - } - return ovs_dp_upcall(dp, skb, &upcall); -} + } /* End of switch. */ + } -static bool last_action(const struct nlattr *a, int rem) -{ - return a->nla_len == rem; + return ovs_dp_upcall(dp, skb, key, &upcall); } static int sample(struct datapath *dp, struct sk_buff *skb, @@ -543,7 +615,7 @@ static int sample(struct datapath *dp, struct sk_buff *skb, * user space. This skb will be consumed by its caller. */ if (likely(nla_type(a) == OVS_ACTION_ATTR_USERSPACE && - last_action(a, rem))) + nla_is_last(a, rem))) return output_userspace(dp, skb, key, a); skb = skb_clone(skb, GFP_ATOMIC); @@ -576,18 +648,20 @@ static void execute_hash(struct sk_buff *skb, struct sw_flow_key *key, key->ovs_flow_hash = hash; } -static int execute_set_action(struct sk_buff *skb, - const struct nlattr *nested_attr) +static int execute_set_action(struct sk_buff *skb, struct sw_flow_key *key, + const struct nlattr *nested_attr) { int err = 0; switch (nla_type(nested_attr)) { case OVS_KEY_ATTR_PRIORITY: skb->priority = nla_get_u32(nested_attr); + key->phy.priority = skb->priority; break; case OVS_KEY_ATTR_SKB_MARK: skb->mark = nla_get_u32(nested_attr); + key->phy.skb_mark = skb->mark; break; case OVS_KEY_ATTR_TUNNEL_INFO: @@ -595,27 +669,31 @@ static int execute_set_action(struct sk_buff *skb, break; case OVS_KEY_ATTR_ETHERNET: - err = set_eth_addr(skb, nla_data(nested_attr)); + err = set_eth_addr(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_IPV4: - err = set_ipv4(skb, nla_data(nested_attr)); + err = set_ipv4(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_IPV6: - err = set_ipv6(skb, nla_data(nested_attr)); + err = set_ipv6(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_TCP: - err = set_tcp(skb, nla_data(nested_attr)); + err = set_tcp(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_UDP: - err = set_udp(skb, nla_data(nested_attr)); + err = set_udp(skb, key, nla_data(nested_attr)); break; case OVS_KEY_ATTR_SCTP: - err = set_sctp(skb, nla_data(nested_attr)); + err = set_sctp(skb, key, nla_data(nested_attr)); + break; + + case OVS_KEY_ATTR_MPLS: + err = set_mpls(skb, key, nla_data(nested_attr)); break; } @@ -627,13 +705,17 @@ static int execute_recirc(struct datapath *dp, struct sk_buff *skb, const struct nlattr *a, int rem) { struct deferred_action *da; - int err; - err = ovs_flow_key_update(skb, key); - if (err) - return err; + if (!is_flow_key_valid(key)) { + int err; + + err = ovs_flow_key_update(skb, key); + if (err) + return err; + } + BUG_ON(!is_flow_key_valid(key)); - if (!last_action(a, rem)) { + if (!nla_is_last(a, rem)) { /* Recirc action is the not the last action * of the action list, need to clone the skb. */ @@ -668,7 +750,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, /* Every output action needs a separate clone of 'skb', but the common * case is just a single output action, so that doing a clone and * then freeing the original skbuff is wasteful. So the following code - * is slightly obscure just to avoid that. */ + * is slightly obscure just to avoid that. + */ int prev_port = -1; const struct nlattr *a; int rem; @@ -677,8 +760,12 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, a = nla_next(a, &rem)) { int err = 0; - if (prev_port != -1) { - do_output(dp, skb_clone(skb, GFP_ATOMIC), prev_port); + if (unlikely(prev_port != -1)) { + struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC); + + if (out_skb) + do_output(dp, out_skb, prev_port); + prev_port = -1; } @@ -695,19 +782,25 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, execute_hash(skb, key, a); break; + case OVS_ACTION_ATTR_PUSH_MPLS: + err = push_mpls(skb, key, nla_data(a)); + break; + + case OVS_ACTION_ATTR_POP_MPLS: + err = pop_mpls(skb, key, nla_get_be16(a)); + break; + case OVS_ACTION_ATTR_PUSH_VLAN: - err = push_vlan(skb, nla_data(a)); - if (unlikely(err)) /* skb already freed. */ - return err; + err = push_vlan(skb, key, nla_data(a)); break; case OVS_ACTION_ATTR_POP_VLAN: - err = pop_vlan(skb); + err = pop_vlan(skb, key); break; case OVS_ACTION_ATTR_RECIRC: err = execute_recirc(dp, skb, key, a, rem); - if (last_action(a, rem)) { + if (nla_is_last(a, rem)) { /* If this is the last action, the skb has * been consumed or freed. * Return immediately. @@ -717,7 +810,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, break; case OVS_ACTION_ATTR_SET: - err = execute_set_action(skb, nla_data(a)); + err = execute_set_action(skb, key, nla_data(a)); break; case OVS_ACTION_ATTR_SAMPLE: @@ -769,14 +862,12 @@ static void process_deferred_actions(struct datapath *dp) /* Execute a list of actions against 'skb'. */ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, + const struct sw_flow_actions *acts, struct sw_flow_key *key) { int level = this_cpu_read(exec_actions_level); - struct sw_flow_actions *acts; int err; - acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); - this_cpu_inc(exec_actions_level); OVS_CB(skb)->egress_tun_info = NULL; err = do_execute_actions(dp, skb, key, diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2e31d9e7f4dc..f37ca3e5824c 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -59,6 +59,7 @@ #include "vport-netdev.h" int ovs_net_id __read_mostly; +EXPORT_SYMBOL_GPL(ovs_net_id); static struct genl_family dp_packet_genl_family; static struct genl_family dp_flow_genl_family; @@ -130,27 +131,41 @@ int lockdep_ovsl_is_held(void) else return 1; } +EXPORT_SYMBOL_GPL(lockdep_ovsl_is_held); #endif static struct vport *new_vport(const struct vport_parms *); static int queue_gso_packets(struct datapath *dp, struct sk_buff *, + const struct sw_flow_key *, const struct dp_upcall_info *); static int queue_userspace_packet(struct datapath *dp, struct sk_buff *, + const struct sw_flow_key *, const struct dp_upcall_info *); -/* Must be called with rcu_read_lock or ovs_mutex. */ -static struct datapath *get_dp(struct net *net, int dp_ifindex) +/* Must be called with rcu_read_lock. */ +static struct datapath *get_dp_rcu(struct net *net, int dp_ifindex) { - struct datapath *dp = NULL; - struct net_device *dev; + struct net_device *dev = dev_get_by_index_rcu(net, dp_ifindex); - rcu_read_lock(); - dev = dev_get_by_index_rcu(net, dp_ifindex); if (dev) { struct vport *vport = ovs_internal_dev_get_vport(dev); if (vport) - dp = vport->dp; + return vport->dp; } + + return NULL; +} + +/* The caller must hold either ovs_mutex or rcu_read_lock to keep the + * returned dp pointer valid. + */ +static inline struct datapath *get_dp(struct net *net, int dp_ifindex) +{ + struct datapath *dp; + + WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_ovsl_is_held()); + rcu_read_lock(); + dp = get_dp_rcu(net, dp_ifindex); rcu_read_unlock(); return dp; @@ -163,7 +178,7 @@ const char *ovs_dp_name(const struct datapath *dp) return vport->ops->get_name(vport); } -static int get_dpifindex(struct datapath *dp) +static int get_dpifindex(const struct datapath *dp) { struct vport *local; int ifindex; @@ -185,6 +200,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu) { struct datapath *dp = container_of(rcu, struct datapath, rcu); + ovs_flow_tbl_destroy(&dp->table); free_percpu(dp->stats_percpu); release_net(ovs_dp_get_net(dp)); kfree(dp->ports); @@ -243,6 +259,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) const struct vport *p = OVS_CB(skb)->input_vport; struct datapath *dp = p->dp; struct sw_flow *flow; + struct sw_flow_actions *sf_acts; struct dp_stats_percpu *stats; u64 *stats_counter; u32 n_mask_hit; @@ -256,10 +273,10 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) int error; upcall.cmd = OVS_PACKET_CMD_MISS; - upcall.key = key; upcall.userdata = NULL; upcall.portid = ovs_vport_find_upcall_portid(p, skb); - error = ovs_dp_upcall(dp, skb, &upcall); + upcall.egress_tun_info = NULL; + error = ovs_dp_upcall(dp, skb, key, &upcall); if (unlikely(error)) kfree_skb(skb); else @@ -268,10 +285,10 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key) goto out; } - OVS_CB(skb)->flow = flow; + ovs_flow_stats_update(flow, key->tp.flags, skb); + sf_acts = rcu_dereference(flow->sf_acts); + ovs_execute_actions(dp, skb, sf_acts, key); - ovs_flow_stats_update(OVS_CB(skb)->flow, key->tp.flags, skb); - ovs_execute_actions(dp, skb, key); stats_counter = &stats->n_hit; out: @@ -283,6 +300,7 @@ out: } int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, + const struct sw_flow_key *key, const struct dp_upcall_info *upcall_info) { struct dp_stats_percpu *stats; @@ -294,9 +312,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb, } if (!skb_is_gso(skb)) - err = queue_userspace_packet(dp, skb, upcall_info); + err = queue_userspace_packet(dp, skb, key, upcall_info); else - err = queue_gso_packets(dp, skb, upcall_info); + err = queue_gso_packets(dp, skb, key, upcall_info); if (err) goto err; @@ -313,37 +331,43 @@ err: } static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, + const struct sw_flow_key *key, const struct dp_upcall_info *upcall_info) { unsigned short gso_type = skb_shinfo(skb)->gso_type; - struct dp_upcall_info later_info; struct sw_flow_key later_key; struct sk_buff *segs, *nskb; + struct ovs_skb_cb ovs_cb; int err; + ovs_cb = *OVS_CB(skb); segs = __skb_gso_segment(skb, NETIF_F_SG, false); + *OVS_CB(skb) = ovs_cb; if (IS_ERR(segs)) return PTR_ERR(segs); + if (segs == NULL) + return -EINVAL; + + if (gso_type & SKB_GSO_UDP) { + /* The initial flow key extracted by ovs_flow_key_extract() + * in this case is for a first fragment, so we need to + * properly mark later fragments. + */ + later_key = *key; + later_key.ip.frag = OVS_FRAG_TYPE_LATER; + } /* Queue all of the segments. */ skb = segs; do { - err = queue_userspace_packet(dp, skb, upcall_info); + *OVS_CB(skb) = ovs_cb; + if (gso_type & SKB_GSO_UDP && skb != segs) + key = &later_key; + + err = queue_userspace_packet(dp, skb, key, upcall_info); if (err) break; - if (skb == segs && gso_type & SKB_GSO_UDP) { - /* The initial flow key extracted by ovs_flow_extract() - * in this case is for a first fragment, so we need to - * properly mark later fragments. - */ - later_key = *upcall_info->key; - later_key.ip.frag = OVS_FRAG_TYPE_LATER; - - later_info = *upcall_info; - later_info.key = &later_key; - upcall_info = &later_info; - } } while ((skb = skb->next)); /* Free all of the segments. */ @@ -358,46 +382,26 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, return err; } -static size_t key_attr_size(void) -{ - return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ - + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ - + nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ - + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ - + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ - + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ - + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ - + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ - + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ - + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */ - + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */ - + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ - + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ - + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ - + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ - + nla_total_size(4) /* OVS_KEY_ATTR_8021Q */ - + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ - + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ - + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ - + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ - + nla_total_size(28); /* OVS_KEY_ATTR_ND */ -} - -static size_t upcall_msg_size(const struct nlattr *userdata, +static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info, unsigned int hdrlen) { size_t size = NLMSG_ALIGN(sizeof(struct ovs_header)) + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */ - + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */ + + nla_total_size(ovs_key_attr_size()); /* OVS_PACKET_ATTR_KEY */ /* OVS_PACKET_ATTR_USERDATA */ - if (userdata) - size += NLA_ALIGN(userdata->nla_len); + if (upcall_info->userdata) + size += NLA_ALIGN(upcall_info->userdata->nla_len); + + /* OVS_PACKET_ATTR_EGRESS_TUN_KEY */ + if (upcall_info->egress_tun_info) + size += nla_total_size(ovs_tun_key_attr_size()); return size; } static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, + const struct sw_flow_key *key, const struct dp_upcall_info *upcall_info) { struct ovs_header *upcall; @@ -421,11 +425,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, if (!nskb) return -ENOMEM; - nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb)); + nskb = __vlan_hwaccel_push_inside(nskb); if (!nskb) return -ENOMEM; - nskb->vlan_tci = 0; skb = nskb; } @@ -448,7 +451,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, else hlen = skb->len; - len = upcall_msg_size(upcall_info->userdata, hlen); + len = upcall_msg_size(upcall_info, hlen); user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC); if (!user_skb) { err = -ENOMEM; @@ -460,7 +463,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, upcall->dp_ifindex = dp_ifindex; nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY); - err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb); + err = ovs_nla_put_flow(key, key, user_skb); BUG_ON(err); nla_nest_end(user_skb, nla); @@ -469,6 +472,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, nla_len(upcall_info->userdata), nla_data(upcall_info->userdata)); + if (upcall_info->egress_tun_info) { + nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY); + err = ovs_nla_put_egress_tunnel_key(user_skb, + upcall_info->egress_tun_info); + BUG_ON(err); + nla_nest_end(user_skb, nla); + } + /* Only reserve room for attribute header, packet data is added * in skb_zerocopy() */ if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) { @@ -508,11 +519,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct sw_flow_actions *acts; struct sk_buff *packet; struct sw_flow *flow; + struct sw_flow_actions *sf_acts; struct datapath *dp; struct ethhdr *eth; struct vport *input_vport; int len; int err; + bool log = !a[OVS_FLOW_ATTR_PROBE]; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || @@ -546,29 +559,22 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) goto err_kfree_skb; err = ovs_flow_key_extract_userspace(a[OVS_PACKET_ATTR_KEY], packet, - &flow->key); + &flow->key, log); if (err) goto err_flow_free; - acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS])); - err = PTR_ERR(acts); - if (IS_ERR(acts)) - goto err_flow_free; - err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS], - &flow->key, 0, &acts); + &flow->key, &acts, log); if (err) goto err_flow_free; rcu_assign_pointer(flow->sf_acts, acts); - OVS_CB(packet)->egress_tun_info = NULL; - OVS_CB(packet)->flow = flow; packet->priority = flow->key.phy.priority; packet->mark = flow->key.phy.skb_mark; rcu_read_lock(); - dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); err = -ENODEV; if (!dp) goto err_unlock; @@ -581,9 +587,10 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) goto err_unlock; OVS_CB(packet)->input_vport = input_vport; + sf_acts = rcu_dereference(flow->sf_acts); local_bh_disable(); - err = ovs_execute_actions(dp, packet, &flow->key); + err = ovs_execute_actions(dp, packet, sf_acts, &flow->key); local_bh_enable(); rcu_read_unlock(); @@ -626,7 +633,7 @@ static struct genl_family dp_packet_genl_family = { .n_ops = ARRAY_SIZE(dp_packet_genl_ops), }; -static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, +static void get_dp_stats(const struct datapath *dp, struct ovs_dp_stats *stats, struct ovs_dp_megaflow_stats *mega_stats) { int i; @@ -660,8 +667,8 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats, static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) { return NLMSG_ALIGN(sizeof(struct ovs_header)) - + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */ - + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */ + + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_KEY */ + + nla_total_size(ovs_key_attr_size()) /* OVS_FLOW_ATTR_MASK */ + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */ + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */ + nla_total_size(8) /* OVS_FLOW_ATTR_USED */ @@ -669,58 +676,67 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts) } /* Called with ovs_mutex or RCU read lock. */ -static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, - struct sk_buff *skb, u32 portid, - u32 seq, u32 flags, u8 cmd) +static int ovs_flow_cmd_fill_match(const struct sw_flow *flow, + struct sk_buff *skb) { - const int skb_orig_len = skb->len; - struct nlattr *start; - struct ovs_flow_stats stats; - __be16 tcp_flags; - unsigned long used; - struct ovs_header *ovs_header; struct nlattr *nla; int err; - ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd); - if (!ovs_header) - return -EMSGSIZE; - - ovs_header->dp_ifindex = dp_ifindex; - /* Fill flow key. */ nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY); if (!nla) - goto nla_put_failure; + return -EMSGSIZE; err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb); if (err) - goto error; + return err; + nla_nest_end(skb, nla); + /* Fill flow mask. */ nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK); if (!nla) - goto nla_put_failure; + return -EMSGSIZE; err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb); if (err) - goto error; + return err; nla_nest_end(skb, nla); + return 0; +} + +/* Called with ovs_mutex or RCU read lock. */ +static int ovs_flow_cmd_fill_stats(const struct sw_flow *flow, + struct sk_buff *skb) +{ + struct ovs_flow_stats stats; + __be16 tcp_flags; + unsigned long used; ovs_flow_stats_get(flow, &stats, &used, &tcp_flags); if (used && nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used))) - goto nla_put_failure; + return -EMSGSIZE; if (stats.n_packets && nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats)) - goto nla_put_failure; + return -EMSGSIZE; if ((u8)ntohs(tcp_flags) && nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags))) - goto nla_put_failure; + return -EMSGSIZE; + + return 0; +} + +/* Called with ovs_mutex or RCU read lock. */ +static int ovs_flow_cmd_fill_actions(const struct sw_flow *flow, + struct sk_buff *skb, int skb_orig_len) +{ + struct nlattr *start; + int err; /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if * this is the first flow to be dumped into 'skb'. This is unusual for @@ -744,17 +760,47 @@ static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, nla_nest_end(skb, start); else { if (skb_orig_len) - goto error; + return err; nla_nest_cancel(skb, start); } - } else if (skb_orig_len) - goto nla_put_failure; + } else if (skb_orig_len) { + return -EMSGSIZE; + } + + return 0; +} + +/* Called with ovs_mutex or RCU read lock. */ +static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex, + struct sk_buff *skb, u32 portid, + u32 seq, u32 flags, u8 cmd) +{ + const int skb_orig_len = skb->len; + struct ovs_header *ovs_header; + int err; + + ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, + flags, cmd); + if (!ovs_header) + return -EMSGSIZE; + + ovs_header->dp_ifindex = dp_ifindex; + + err = ovs_flow_cmd_fill_match(flow, skb); + if (err) + goto error; + + err = ovs_flow_cmd_fill_stats(flow, skb); + if (err) + goto error; + + err = ovs_flow_cmd_fill_actions(flow, skb, skb_orig_len); + if (err) + goto error; return genlmsg_end(skb, ovs_header); -nla_put_failure: - err = -EMSGSIZE; error: genlmsg_cancel(skb, ovs_header); return err; @@ -809,13 +855,18 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) struct sw_flow_actions *acts; struct sw_flow_match match; int error; + bool log = !a[OVS_FLOW_ATTR_PROBE]; /* Must have key and actions. */ error = -EINVAL; - if (!a[OVS_FLOW_ATTR_KEY]) + if (!a[OVS_FLOW_ATTR_KEY]) { + OVS_NLERR(log, "Flow key attr not present in new flow."); goto error; - if (!a[OVS_FLOW_ATTR_ACTIONS]) + } + if (!a[OVS_FLOW_ATTR_ACTIONS]) { + OVS_NLERR(log, "Flow actions attr not present in new flow."); goto error; + } /* Most of the time we need to allocate a new flow, do it before * locking. @@ -828,24 +879,19 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) /* Extract key. */ ovs_match_init(&match, &new_flow->unmasked_key, &mask); - error = ovs_nla_get_match(&match, - a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); + error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], + a[OVS_FLOW_ATTR_MASK], log); if (error) goto err_kfree_flow; ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask); /* Validate actions. */ - acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS])); - error = PTR_ERR(acts); - if (IS_ERR(acts)) - goto err_kfree_flow; - error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key, - 0, &acts); + &acts, log); if (error) { - OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); - goto err_kfree_acts; + OVS_NLERR(log, "Flow actions may not be safe on all matching packets."); + goto err_kfree_flow; } reply = ovs_flow_cmd_alloc_info(acts, info, false); @@ -897,6 +943,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) } /* The unmasked key has to be the same for flow updates. */ if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { + /* Look for any overlapping flow. */ flow = ovs_flow_tbl_lookup_exact(&dp->table, &match); if (!flow) { error = -ENOENT; @@ -936,23 +983,21 @@ error: return error; } +/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a, const struct sw_flow_key *key, - const struct sw_flow_mask *mask) + const struct sw_flow_mask *mask, + bool log) { struct sw_flow_actions *acts; struct sw_flow_key masked_key; int error; - acts = ovs_nla_alloc_flow_actions(nla_len(a)); - if (IS_ERR(acts)) - return acts; - ovs_flow_mask_key(&masked_key, key, mask); - error = ovs_nla_copy_actions(a, &masked_key, 0, &acts); + error = ovs_nla_copy_actions(a, &masked_key, &acts, log); if (error) { - OVS_NLERR("Flow actions may not be safe on all matching packets.\n"); - kfree(acts); + OVS_NLERR(log, + "Actions may not be safe on all matching packets"); return ERR_PTR(error); } @@ -971,29 +1016,31 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info) struct sw_flow_actions *old_acts = NULL, *acts = NULL; struct sw_flow_match match; int error; + bool log = !a[OVS_FLOW_ATTR_PROBE]; /* Extract key. */ error = -EINVAL; - if (!a[OVS_FLOW_ATTR_KEY]) + if (!a[OVS_FLOW_ATTR_KEY]) { + OVS_NLERR(log, "Flow key attribute not present in set flow."); goto error; + } ovs_match_init(&match, &key, &mask); - error = ovs_nla_get_match(&match, - a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]); + error = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], + a[OVS_FLOW_ATTR_MASK], log); if (error) goto error; /* Validate actions. */ if (a[OVS_FLOW_ATTR_ACTIONS]) { - acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask); + acts = get_flow_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, &mask, + log); if (IS_ERR(acts)) { error = PTR_ERR(acts); goto error; } - } - /* Can allocate before locking if have acts. */ - if (acts) { + /* Can allocate before locking if have acts. */ reply = ovs_flow_cmd_alloc_info(acts, info, false); if (IS_ERR(reply)) { error = PTR_ERR(reply); @@ -1068,14 +1115,16 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info) struct datapath *dp; struct sw_flow_match match; int err; + bool log = !a[OVS_FLOW_ATTR_PROBE]; if (!a[OVS_FLOW_ATTR_KEY]) { - OVS_NLERR("Flow get message rejected, Key attribute missing.\n"); + OVS_NLERR(log, + "Flow get message rejected, Key attribute missing."); return -EINVAL; } ovs_match_init(&match, &key, NULL); - err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); + err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, log); if (err) return err; @@ -1116,10 +1165,12 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info) struct datapath *dp; struct sw_flow_match match; int err; + bool log = !a[OVS_FLOW_ATTR_PROBE]; if (likely(a[OVS_FLOW_ATTR_KEY])) { ovs_match_init(&match, &key, NULL); - err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL); + err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL, + log); if (unlikely(err)) return err; } @@ -1177,7 +1228,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) struct datapath *dp; rcu_read_lock(); - dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); if (!dp) { rcu_read_unlock(); return -ENODEV; @@ -1209,8 +1260,10 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = { [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED }, + [OVS_FLOW_ATTR_MASK] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED }, [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG }, + [OVS_FLOW_ATTR_PROBE] = { .type = NLA_FLAG }, }; static const struct genl_ops dp_flow_genl_ops[] = { @@ -1311,7 +1364,7 @@ static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info) /* Called with rcu_read_lock or ovs_mutex. */ static struct datapath *lookup_datapath(struct net *net, - struct ovs_header *ovs_header, + const struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1]) { struct datapath *dp; @@ -1339,7 +1392,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in dp->user_features = 0; } -static void ovs_dp_change(struct datapath *dp, struct nlattr **a) +static void ovs_dp_change(struct datapath *dp, struct nlattr *a[]) { if (a[OVS_DP_ATTR_USER_FEATURES]) dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]); @@ -1440,7 +1493,7 @@ err_destroy_ports_array: err_destroy_percpu: free_percpu(dp->stats_percpu); err_destroy_table: - ovs_flow_tbl_destroy(&dp->table, false); + ovs_flow_tbl_destroy(&dp->table); err_free_dp: release_net(ovs_dp_get_net(dp)); kfree(dp); @@ -1472,8 +1525,6 @@ static void __dp_destroy(struct datapath *dp) ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL)); /* RCU destroy the flow table */ - ovs_flow_tbl_destroy(&dp->table, true); - call_rcu(&dp->rcu, destroy_dp_rcu); } @@ -1705,7 +1756,7 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid, /* Called with ovs_mutex or RCU read lock. */ static struct vport *lookup_vport(struct net *net, - struct ovs_header *ovs_header, + const struct ovs_header *ovs_header, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1]) { struct datapath *dp; @@ -1762,6 +1813,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) return -ENOMEM; ovs_lock(); +restart: dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); err = -ENODEV; if (!dp) @@ -1793,8 +1845,11 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info) vport = new_vport(&parms); err = PTR_ERR(vport); - if (IS_ERR(vport)) + if (IS_ERR(vport)) { + if (err == -EAGAIN) + goto restart; goto exit_unlock_free; + } err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, info->snd_seq, 0, OVS_VPORT_CMD_NEW); @@ -1937,7 +1992,7 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb) int i, j = 0; rcu_read_lock(); - dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex); + dp = get_dp_rcu(sock_net(skb->sk), ovs_header->dp_ifindex); if (!dp) { rcu_read_unlock(); return -ENODEV; @@ -2110,12 +2165,18 @@ static int __init dp_init(void) if (err) goto error_netns_exit; + err = ovs_netdev_init(); + if (err) + goto error_unreg_notifier; + err = dp_register_genl(); if (err < 0) - goto error_unreg_notifier; + goto error_unreg_netdev; return 0; +error_unreg_netdev: + ovs_netdev_exit(); error_unreg_notifier: unregister_netdevice_notifier(&ovs_dp_device_notifier); error_netns_exit: @@ -2135,6 +2196,7 @@ error: static void dp_cleanup(void) { dp_unregister_genl(ARRAY_SIZE(dp_genl_families)); + ovs_netdev_exit(); unregister_netdevice_notifier(&ovs_dp_device_notifier); unregister_pernet_device(&ovs_net_ops); rcu_barrier(); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 974135439c5c..3ece94563079 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -94,14 +94,12 @@ struct datapath { /** * struct ovs_skb_cb - OVS data in skb CB - * @flow: The flow associated with this packet. May be %NULL if no flow. * @egress_tun_key: Tunnel information about this packet on egress path. * NULL if the packet is not being tunneled. * @input_vport: The original vport packet came in on. This value is cached * when a packet is received by OVS. */ struct ovs_skb_cb { - struct sw_flow *flow; struct ovs_tunnel_info *egress_tun_info; struct vport *input_vport; }; @@ -110,18 +108,18 @@ struct ovs_skb_cb { /** * struct dp_upcall - metadata to include with a packet to send to userspace * @cmd: One of %OVS_PACKET_CMD_*. - * @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull. * @userdata: If nonnull, its variable-length value is passed to userspace as * %OVS_PACKET_ATTR_USERDATA. - * @pid: Netlink PID to which packet should be sent. If @pid is 0 then no - * packet is sent and the packet is accounted in the datapath's @n_lost + * @portid: Netlink portid to which packet should be sent. If @portid is 0 + * then no packet is sent and the packet is accounted in the datapath's @n_lost * counter. + * @egress_tun_info: If nonnull, becomes %OVS_PACKET_ATTR_EGRESS_TUN_KEY. */ struct dp_upcall_info { - u8 cmd; - const struct sw_flow_key *key; + const struct ovs_tunnel_info *egress_tun_info; const struct nlattr *userdata; u32 portid; + u8 cmd; }; /** @@ -151,7 +149,7 @@ int lockdep_ovsl_is_held(void); #define rcu_dereference_ovsl(p) \ rcu_dereference_check(p, lockdep_ovsl_is_held()) -static inline struct net *ovs_dp_get_net(struct datapath *dp) +static inline struct net *ovs_dp_get_net(const struct datapath *dp) { return read_pnet(&dp->net); } @@ -187,23 +185,23 @@ extern struct genl_family dp_vport_genl_family; void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key); void ovs_dp_detach_port(struct vport *); int ovs_dp_upcall(struct datapath *, struct sk_buff *, - const struct dp_upcall_info *); + const struct sw_flow_key *, const struct dp_upcall_info *); const char *ovs_dp_name(const struct datapath *dp); struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq, u8 cmd); int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb, - struct sw_flow_key *); + const struct sw_flow_actions *, struct sw_flow_key *); void ovs_dp_notify_wq(struct work_struct *work); int action_fifos_init(void); void action_fifos_exit(void); -#define OVS_NLERR(fmt, ...) \ +#define OVS_NLERR(logging_allowed, fmt, ...) \ do { \ - if (net_ratelimit()) \ - pr_info("netlink: " fmt, ##__VA_ARGS__); \ + if (logging_allowed && net_ratelimit()) \ + pr_info("netlink: " fmt "\n", ##__VA_ARGS__); \ } while (0) #endif /* datapath.h */ diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 2b78789ea7c5..70bef2ab7f2b 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -32,6 +32,7 @@ #include <linux/if_arp.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/mpls.h> #include <linux/sctp.h> #include <linux/smp.h> #include <linux/tcp.h> @@ -42,6 +43,7 @@ #include <net/ip.h> #include <net/ip_tunnels.h> #include <net/ipv6.h> +#include <net/mpls.h> #include <net/ndisc.h> #include "datapath.h" @@ -64,7 +66,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies) #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, - struct sk_buff *skb) + const struct sk_buff *skb) { struct flow_stats *stats; int node = numa_node_id(); @@ -480,6 +482,7 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) return -ENOMEM; skb_reset_network_header(skb); + skb_reset_mac_len(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ @@ -584,6 +587,33 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv4, 0, sizeof(key->ipv4)); } + } else if (eth_p_mpls(key->eth.type)) { + size_t stack_len = MPLS_HLEN; + + /* In the presence of an MPLS label stack the end of the L2 + * header and the beginning of the L3 header differ. + * + * Advance network_header to the beginning of the L3 + * header. mac_len corresponds to the end of the L2 header. + */ + while (1) { + __be32 lse; + + error = check_header(skb, skb->mac_len + stack_len); + if (unlikely(error)) + return 0; + + memcpy(&lse, skb_network_header(skb), MPLS_HLEN); + + if (stack_len == MPLS_HLEN) + memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN); + + skb_set_network_header(skb, skb->mac_len + stack_len); + if (lse & htonl(MPLS_LS_S_MASK)) + break; + + stack_len += MPLS_HLEN; + } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ @@ -649,7 +679,7 @@ int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) return key_extract(skb, key); } -int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, +int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info, struct sk_buff *skb, struct sw_flow_key *key) { /* Extract metadata from packet. */ @@ -682,12 +712,12 @@ int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, int ovs_flow_key_extract_userspace(const struct nlattr *attr, struct sk_buff *skb, - struct sw_flow_key *key) + struct sw_flow_key *key, bool log) { int err; /* Extract metadata from netlink attributes. */ - err = ovs_nla_get_flow_metadata(attr, key); + err = ovs_nla_get_flow_metadata(attr, key, log); if (err) return err; diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h index 71813318c8c7..a8b30f334388 100644 --- a/net/openvswitch/flow.h +++ b/net/openvswitch/flow.h @@ -37,8 +37,8 @@ struct sk_buff; /* Used to memset ovs_key_ipv4_tunnel padding. */ #define OVS_TUNNEL_KEY_SIZE \ - (offsetof(struct ovs_key_ipv4_tunnel, ipv4_ttl) + \ - FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, ipv4_ttl)) + (offsetof(struct ovs_key_ipv4_tunnel, tp_dst) + \ + FIELD_SIZEOF(struct ovs_key_ipv4_tunnel, tp_dst)) struct ovs_key_ipv4_tunnel { __be64 tun_id; @@ -47,11 +47,13 @@ struct ovs_key_ipv4_tunnel { __be16 tun_flags; u8 ipv4_tos; u8 ipv4_ttl; + __be16 tp_src; + __be16 tp_dst; } __packed __aligned(4); /* Minimize padding. */ struct ovs_tunnel_info { struct ovs_key_ipv4_tunnel tunnel; - struct geneve_opt *options; + const struct geneve_opt *options; u8 options_len; }; @@ -64,27 +66,59 @@ struct ovs_tunnel_info { FIELD_SIZEOF(struct sw_flow_key, tun_opts) - \ opt_len)) -static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info, - const struct iphdr *iph, - __be64 tun_id, __be16 tun_flags, - struct geneve_opt *opts, - u8 opts_len) +static inline void __ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info, + __be32 saddr, __be32 daddr, + u8 tos, u8 ttl, + __be16 tp_src, + __be16 tp_dst, + __be64 tun_id, + __be16 tun_flags, + const struct geneve_opt *opts, + u8 opts_len) { tun_info->tunnel.tun_id = tun_id; - tun_info->tunnel.ipv4_src = iph->saddr; - tun_info->tunnel.ipv4_dst = iph->daddr; - tun_info->tunnel.ipv4_tos = iph->tos; - tun_info->tunnel.ipv4_ttl = iph->ttl; + tun_info->tunnel.ipv4_src = saddr; + tun_info->tunnel.ipv4_dst = daddr; + tun_info->tunnel.ipv4_tos = tos; + tun_info->tunnel.ipv4_ttl = ttl; tun_info->tunnel.tun_flags = tun_flags; - /* clear struct padding. */ - memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, 0, - sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE); + /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of + * the upper tunnel are used. + * E.g: GRE over IPSEC, the tp_src and tp_port are zero. + */ + tun_info->tunnel.tp_src = tp_src; + tun_info->tunnel.tp_dst = tp_dst; + + /* Clear struct padding. */ + if (sizeof(tun_info->tunnel) != OVS_TUNNEL_KEY_SIZE) + memset((unsigned char *)&tun_info->tunnel + OVS_TUNNEL_KEY_SIZE, + 0, sizeof(tun_info->tunnel) - OVS_TUNNEL_KEY_SIZE); tun_info->options = opts; tun_info->options_len = opts_len; } +static inline void ovs_flow_tun_info_init(struct ovs_tunnel_info *tun_info, + const struct iphdr *iph, + __be16 tp_src, + __be16 tp_dst, + __be64 tun_id, + __be16 tun_flags, + const struct geneve_opt *opts, + u8 opts_len) +{ + __ovs_flow_tun_info_init(tun_info, iph->saddr, iph->daddr, + iph->tos, iph->ttl, + tp_src, tp_dst, + tun_id, tun_flags, + opts, opts_len); +} + +#define OVS_SW_FLOW_KEY_METADATA_SIZE \ + (offsetof(struct sw_flow_key, recirc_id) + \ + FIELD_SIZEOF(struct sw_flow_key, recirc_id)) + struct sw_flow_key { u8 tun_opts[255]; u8 tun_opts_len; @@ -102,12 +136,17 @@ struct sw_flow_key { __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */ __be16 type; /* Ethernet frame type. */ } eth; - struct { - u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */ - u8 tos; /* IP ToS. */ - u8 ttl; /* IP TTL/hop limit. */ - u8 frag; /* One of OVS_FRAG_TYPE_*. */ - } ip; + union { + struct { + __be32 top_lse; /* top label stack entry */ + } mpls; + struct { + u8 proto; /* IP protocol or lower 8 bits of ARP opcode. */ + u8 tos; /* IP ToS. */ + u8 ttl; /* IP TTL/hop limit. */ + u8 frag; /* One of OVS_FRAG_TYPE_*. */ + } ip; + }; struct { __be16 src; /* TCP/UDP/SCTP source port. */ __be16 dst; /* TCP/UDP/SCTP destination port. */ @@ -205,18 +244,19 @@ struct arp_eth_header { } __packed; void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags, - struct sk_buff *); + const struct sk_buff *); void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, unsigned long *used, __be16 *tcp_flags); void ovs_flow_stats_clear(struct sw_flow *); u64 ovs_flow_used_time(unsigned long flow_jiffies); int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); -int ovs_flow_key_extract(struct ovs_tunnel_info *tun_info, struct sk_buff *skb, +int ovs_flow_key_extract(const struct ovs_tunnel_info *tun_info, + struct sk_buff *skb, struct sw_flow_key *key); /* Extract key from packet coming from userspace. */ int ovs_flow_key_extract_userspace(const struct nlattr *attr, struct sk_buff *skb, - struct sw_flow_key *key); + struct sw_flow_key *key, bool log); #endif /* flow.h */ diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 939bcb32100f..c8fccdded865 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -46,24 +46,22 @@ #include <net/ip.h> #include <net/ipv6.h> #include <net/ndisc.h> +#include <net/mpls.h> #include "flow_netlink.h" -static void update_range__(struct sw_flow_match *match, - size_t offset, size_t size, bool is_mask) +static void update_range(struct sw_flow_match *match, + size_t offset, size_t size, bool is_mask) { - struct sw_flow_key_range *range = NULL; + struct sw_flow_key_range *range; size_t start = rounddown(offset, sizeof(long)); size_t end = roundup(offset + size, sizeof(long)); if (!is_mask) range = &match->range; - else if (match->mask) + else range = &match->mask->range; - if (!range) - return; - if (range->start == range->end) { range->start = start; range->end = end; @@ -79,22 +77,20 @@ static void update_range__(struct sw_flow_match *match, #define SW_FLOW_KEY_PUT(match, field, value, is_mask) \ do { \ - update_range__(match, offsetof(struct sw_flow_key, field), \ - sizeof((match)->key->field), is_mask); \ - if (is_mask) { \ - if ((match)->mask) \ - (match)->mask->key.field = value; \ - } else { \ + update_range(match, offsetof(struct sw_flow_key, field), \ + sizeof((match)->key->field), is_mask); \ + if (is_mask) \ + (match)->mask->key.field = value; \ + else \ (match)->key->field = value; \ - } \ } while (0) #define SW_FLOW_KEY_MEMCPY_OFFSET(match, offset, value_p, len, is_mask) \ do { \ - update_range__(match, offset, len, is_mask); \ + update_range(match, offset, len, is_mask); \ if (is_mask) \ memcpy((u8 *)&(match)->mask->key + offset, value_p, \ - len); \ + len); \ else \ memcpy((u8 *)(match)->key + offset, value_p, len); \ } while (0) @@ -103,22 +99,20 @@ static void update_range__(struct sw_flow_match *match, SW_FLOW_KEY_MEMCPY_OFFSET(match, offsetof(struct sw_flow_key, field), \ value_p, len, is_mask) -#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ - do { \ - update_range__(match, offsetof(struct sw_flow_key, field), \ - sizeof((match)->key->field), is_mask); \ - if (is_mask) { \ - if ((match)->mask) \ - memset((u8 *)&(match)->mask->key.field, value,\ - sizeof((match)->mask->key.field)); \ - } else { \ +#define SW_FLOW_KEY_MEMSET_FIELD(match, field, value, is_mask) \ + do { \ + update_range(match, offsetof(struct sw_flow_key, field), \ + sizeof((match)->key->field), is_mask); \ + if (is_mask) \ + memset((u8 *)&(match)->mask->key.field, value, \ + sizeof((match)->mask->key.field)); \ + else \ memset((u8 *)&(match)->key->field, value, \ sizeof((match)->key->field)); \ - } \ } while (0) static bool match_validate(const struct sw_flow_match *match, - u64 key_attrs, u64 mask_attrs) + u64 key_attrs, u64 mask_attrs, bool log) { u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET; u64 mask_allowed = key_attrs; /* At most allow all key attributes */ @@ -134,7 +128,8 @@ static bool match_validate(const struct sw_flow_match *match, | (1 << OVS_KEY_ATTR_ICMP) | (1 << OVS_KEY_ATTR_ICMPV6) | (1 << OVS_KEY_ATTR_ARP) - | (1 << OVS_KEY_ATTR_ND)); + | (1 << OVS_KEY_ATTR_ND) + | (1 << OVS_KEY_ATTR_MPLS)); /* Always allowed mask fields. */ mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL) @@ -149,6 +144,12 @@ static bool match_validate(const struct sw_flow_match *match, mask_allowed |= 1 << OVS_KEY_ATTR_ARP; } + if (eth_p_mpls(match->key->eth.type)) { + key_expected |= 1 << OVS_KEY_ATTR_MPLS; + if (match->mask && (match->mask->key.eth.type == htons(0xffff))) + mask_allowed |= 1 << OVS_KEY_ATTR_MPLS; + } + if (match->key->eth.type == htons(ETH_P_IP)) { key_expected |= 1 << OVS_KEY_ATTR_IPV4; if (match->mask && (match->mask->key.eth.type == htons(0xffff))) @@ -229,21 +230,65 @@ static bool match_validate(const struct sw_flow_match *match, if ((key_attrs & key_expected) != key_expected) { /* Key attributes check failed. */ - OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n", - (unsigned long long)key_attrs, (unsigned long long)key_expected); + OVS_NLERR(log, "Missing key (keys=%llx, expected=%llx)", + (unsigned long long)key_attrs, + (unsigned long long)key_expected); return false; } if ((mask_attrs & mask_allowed) != mask_attrs) { /* Mask attributes check failed. */ - OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n", - (unsigned long long)mask_attrs, (unsigned long long)mask_allowed); + OVS_NLERR(log, "Unexpected mask (mask=%llx, allowed=%llx)", + (unsigned long long)mask_attrs, + (unsigned long long)mask_allowed); return false; } return true; } +size_t ovs_tun_key_attr_size(void) +{ + /* Whenever adding new OVS_TUNNEL_KEY_ FIELDS, we should consider + * updating this function. + */ + return nla_total_size(8) /* OVS_TUNNEL_KEY_ATTR_ID */ + + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */ + + nla_total_size(4) /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */ + + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TOS */ + + nla_total_size(1) /* OVS_TUNNEL_KEY_ATTR_TTL */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_CSUM */ + + nla_total_size(0) /* OVS_TUNNEL_KEY_ATTR_OAM */ + + nla_total_size(256) /* OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS */ + + nla_total_size(2) /* OVS_TUNNEL_KEY_ATTR_TP_SRC */ + + nla_total_size(2); /* OVS_TUNNEL_KEY_ATTR_TP_DST */ +} + +size_t ovs_key_attr_size(void) +{ + /* Whenever adding new OVS_KEY_ FIELDS, we should consider + * updating this function. + */ + BUILD_BUG_ON(OVS_KEY_ATTR_TUNNEL_INFO != 22); + + return nla_total_size(4) /* OVS_KEY_ATTR_PRIORITY */ + + nla_total_size(0) /* OVS_KEY_ATTR_TUNNEL */ + + ovs_tun_key_attr_size() + + nla_total_size(4) /* OVS_KEY_ATTR_IN_PORT */ + + nla_total_size(4) /* OVS_KEY_ATTR_SKB_MARK */ + + nla_total_size(4) /* OVS_KEY_ATTR_DP_HASH */ + + nla_total_size(4) /* OVS_KEY_ATTR_RECIRC_ID */ + + nla_total_size(12) /* OVS_KEY_ATTR_ETHERNET */ + + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + + nla_total_size(4) /* OVS_KEY_ATTR_VLAN */ + + nla_total_size(0) /* OVS_KEY_ATTR_ENCAP */ + + nla_total_size(2) /* OVS_KEY_ATTR_ETHERTYPE */ + + nla_total_size(40) /* OVS_KEY_ATTR_IPV6 */ + + nla_total_size(2) /* OVS_KEY_ATTR_ICMPV6 */ + + nla_total_size(28); /* OVS_KEY_ATTR_ND */ +} + /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute. */ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_ENCAP] = -1, @@ -266,6 +311,7 @@ static const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = { [OVS_KEY_ATTR_RECIRC_ID] = sizeof(u32), [OVS_KEY_ATTR_DP_HASH] = sizeof(u32), [OVS_KEY_ATTR_TUNNEL] = -1, + [OVS_KEY_ATTR_MPLS] = sizeof(struct ovs_key_mpls), }; static bool is_all_zero(const u8 *fp, size_t size) @@ -284,7 +330,7 @@ static bool is_all_zero(const u8 *fp, size_t size) static int __parse_flow_nlattrs(const struct nlattr *attr, const struct nlattr *a[], - u64 *attrsp, bool nz) + u64 *attrsp, bool log, bool nz) { const struct nlattr *nla; u64 attrs; @@ -296,21 +342,20 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, int expected_len; if (type > OVS_KEY_ATTR_MAX) { - OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n", + OVS_NLERR(log, "Key type %d is out of range max %d", type, OVS_KEY_ATTR_MAX); return -EINVAL; } if (attrs & (1 << type)) { - OVS_NLERR("Duplicate key attribute (type %d).\n", type); + OVS_NLERR(log, "Duplicate key (type %d).", type); return -EINVAL; } expected_len = ovs_key_lens[type]; if (nla_len(nla) != expected_len && expected_len != -1) { - OVS_NLERR("Key attribute has unexpected length (type=%d" - ", length=%d, expected=%d).\n", type, - nla_len(nla), expected_len); + OVS_NLERR(log, "Key %d has unexpected len %d expected %d", + type, nla_len(nla), expected_len); return -EINVAL; } @@ -320,7 +365,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, } } if (rem) { - OVS_NLERR("Message has %d unknown bytes.\n", rem); + OVS_NLERR(log, "Message has %d unknown bytes.", rem); return -EINVAL; } @@ -329,28 +374,84 @@ static int __parse_flow_nlattrs(const struct nlattr *attr, } static int parse_flow_mask_nlattrs(const struct nlattr *attr, - const struct nlattr *a[], u64 *attrsp) + const struct nlattr *a[], u64 *attrsp, + bool log) { - return __parse_flow_nlattrs(attr, a, attrsp, true); + return __parse_flow_nlattrs(attr, a, attrsp, log, true); } static int parse_flow_nlattrs(const struct nlattr *attr, - const struct nlattr *a[], u64 *attrsp) + const struct nlattr *a[], u64 *attrsp, + bool log) { - return __parse_flow_nlattrs(attr, a, attrsp, false); + return __parse_flow_nlattrs(attr, a, attrsp, log, false); +} + +static int genev_tun_opt_from_nlattr(const struct nlattr *a, + struct sw_flow_match *match, bool is_mask, + bool log) +{ + unsigned long opt_key_offset; + + if (nla_len(a) > sizeof(match->key->tun_opts)) { + OVS_NLERR(log, "Geneve option length err (len %d, max %zu).", + nla_len(a), sizeof(match->key->tun_opts)); + return -EINVAL; + } + + if (nla_len(a) % 4 != 0) { + OVS_NLERR(log, "Geneve opt len %d is not a multiple of 4.", + nla_len(a)); + return -EINVAL; + } + + /* We need to record the length of the options passed + * down, otherwise packets with the same format but + * additional options will be silently matched. + */ + if (!is_mask) { + SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a), + false); + } else { + /* This is somewhat unusual because it looks at + * both the key and mask while parsing the + * attributes (and by extension assumes the key + * is parsed first). Normally, we would verify + * that each is the correct length and that the + * attributes line up in the validate function. + * However, that is difficult because this is + * variable length and we won't have the + * information later. + */ + if (match->key->tun_opts_len != nla_len(a)) { + OVS_NLERR(log, "Geneve option len %d != mask len %d", + match->key->tun_opts_len, nla_len(a)); + return -EINVAL; + } + + SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, true); + } + + opt_key_offset = (unsigned long)GENEVE_OPTS((struct sw_flow_key *)0, + nla_len(a)); + SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, nla_data(a), + nla_len(a), is_mask); + return 0; } static int ipv4_tun_from_nlattr(const struct nlattr *attr, - struct sw_flow_match *match, bool is_mask) + struct sw_flow_match *match, bool is_mask, + bool log) { struct nlattr *a; int rem; bool ttl = false; __be16 tun_flags = 0; - unsigned long opt_key_offset; nla_for_each_nested(a, attr, rem) { int type = nla_type(a); + int err; + static const u32 ovs_tunnel_key_lens[OVS_TUNNEL_KEY_ATTR_MAX + 1] = { [OVS_TUNNEL_KEY_ATTR_ID] = sizeof(u64), [OVS_TUNNEL_KEY_ATTR_IPV4_SRC] = sizeof(u32), @@ -359,20 +460,21 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, [OVS_TUNNEL_KEY_ATTR_TTL] = 1, [OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT] = 0, [OVS_TUNNEL_KEY_ATTR_CSUM] = 0, + [OVS_TUNNEL_KEY_ATTR_TP_SRC] = sizeof(u16), + [OVS_TUNNEL_KEY_ATTR_TP_DST] = sizeof(u16), [OVS_TUNNEL_KEY_ATTR_OAM] = 0, [OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS] = -1, }; if (type > OVS_TUNNEL_KEY_ATTR_MAX) { - OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n", - type, OVS_TUNNEL_KEY_ATTR_MAX); + OVS_NLERR(log, "Tunnel attr %d out of range max %d", + type, OVS_TUNNEL_KEY_ATTR_MAX); return -EINVAL; } if (ovs_tunnel_key_lens[type] != nla_len(a) && ovs_tunnel_key_lens[type] != -1) { - OVS_NLERR("IPv4 tunnel attribute type has unexpected " - " length (type=%d, length=%d, expected=%d).\n", + OVS_NLERR(log, "Tunnel attr %d has unexpected len %d expected %d", type, nla_len(a), ovs_tunnel_key_lens[type]); return -EINVAL; } @@ -406,62 +508,26 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, case OVS_TUNNEL_KEY_ATTR_CSUM: tun_flags |= TUNNEL_CSUM; break; + case OVS_TUNNEL_KEY_ATTR_TP_SRC: + SW_FLOW_KEY_PUT(match, tun_key.tp_src, + nla_get_be16(a), is_mask); + break; + case OVS_TUNNEL_KEY_ATTR_TP_DST: + SW_FLOW_KEY_PUT(match, tun_key.tp_dst, + nla_get_be16(a), is_mask); + break; case OVS_TUNNEL_KEY_ATTR_OAM: tun_flags |= TUNNEL_OAM; break; case OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS: - tun_flags |= TUNNEL_OPTIONS_PRESENT; - if (nla_len(a) > sizeof(match->key->tun_opts)) { - OVS_NLERR("Geneve option length exceeds maximum size (len %d, max %zu).\n", - nla_len(a), - sizeof(match->key->tun_opts)); - return -EINVAL; - } - - if (nla_len(a) % 4 != 0) { - OVS_NLERR("Geneve option length is not a multiple of 4 (len %d).\n", - nla_len(a)); - return -EINVAL; - } - - /* We need to record the length of the options passed - * down, otherwise packets with the same format but - * additional options will be silently matched. - */ - if (!is_mask) { - SW_FLOW_KEY_PUT(match, tun_opts_len, nla_len(a), - false); - } else { - /* This is somewhat unusual because it looks at - * both the key and mask while parsing the - * attributes (and by extension assumes the key - * is parsed first). Normally, we would verify - * that each is the correct length and that the - * attributes line up in the validate function. - * However, that is difficult because this is - * variable length and we won't have the - * information later. - */ - if (match->key->tun_opts_len != nla_len(a)) { - OVS_NLERR("Geneve option key length (%d) is different from mask length (%d).", - match->key->tun_opts_len, - nla_len(a)); - return -EINVAL; - } - - SW_FLOW_KEY_PUT(match, tun_opts_len, 0xff, - true); - } + err = genev_tun_opt_from_nlattr(a, match, is_mask, log); + if (err) + return err; - opt_key_offset = (unsigned long)GENEVE_OPTS( - (struct sw_flow_key *)0, - nla_len(a)); - SW_FLOW_KEY_MEMCPY_OFFSET(match, opt_key_offset, - nla_data(a), nla_len(a), - is_mask); + tun_flags |= TUNNEL_OPTIONS_PRESENT; break; default: - OVS_NLERR("Unknown IPv4 tunnel attribute (%d).\n", + OVS_NLERR(log, "Unknown IPv4 tunnel attribute %d", type); return -EINVAL; } @@ -470,18 +536,19 @@ static int ipv4_tun_from_nlattr(const struct nlattr *attr, SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask); if (rem > 0) { - OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem); + OVS_NLERR(log, "IPv4 tunnel attribute has %d unknown bytes.", + rem); return -EINVAL; } if (!is_mask) { if (!match->key->tun_key.ipv4_dst) { - OVS_NLERR("IPv4 tunnel destination address is zero.\n"); + OVS_NLERR(log, "IPv4 tunnel dst address is zero"); return -EINVAL; } if (!ttl) { - OVS_NLERR("IPv4 tunnel TTL not specified.\n"); + OVS_NLERR(log, "IPv4 tunnel TTL not specified."); return -EINVAL; } } @@ -514,6 +581,12 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb, if ((output->tun_flags & TUNNEL_CSUM) && nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM)) return -EMSGSIZE; + if (output->tp_src && + nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_SRC, output->tp_src)) + return -EMSGSIZE; + if (output->tp_dst && + nla_put_be16(skb, OVS_TUNNEL_KEY_ATTR_TP_DST, output->tp_dst)) + return -EMSGSIZE; if ((output->tun_flags & TUNNEL_OAM) && nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_OAM)) return -EMSGSIZE; @@ -525,7 +598,6 @@ static int __ipv4_tun_to_nlattr(struct sk_buff *skb, return 0; } - static int ipv4_tun_to_nlattr(struct sk_buff *skb, const struct ovs_key_ipv4_tunnel *output, const struct geneve_opt *tun_opts, @@ -546,8 +618,17 @@ static int ipv4_tun_to_nlattr(struct sk_buff *skb, return 0; } +int ovs_nla_put_egress_tunnel_key(struct sk_buff *skb, + const struct ovs_tunnel_info *egress_tun_info) +{ + return __ipv4_tun_to_nlattr(skb, &egress_tun_info->tunnel, + egress_tun_info->options, + egress_tun_info->options_len); +} + static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, - const struct nlattr **a, bool is_mask) + const struct nlattr **a, bool is_mask, + bool log) { if (*attrs & (1 << OVS_KEY_ATTR_DP_HASH)) { u32 hash_val = nla_get_u32(a[OVS_KEY_ATTR_DP_HASH]); @@ -572,10 +653,13 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) { u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]); - if (is_mask) + if (is_mask) { in_port = 0xffffffff; /* Always exact match in_port. */ - else if (in_port >= DP_MAX_PORTS) + } else if (in_port >= DP_MAX_PORTS) { + OVS_NLERR(log, "Port %d exceeds max allowable %d", + in_port, DP_MAX_PORTS); return -EINVAL; + } SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask); *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT); @@ -591,7 +675,7 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, } if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) { if (ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match, - is_mask)) + is_mask, log)) return -EINVAL; *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL); } @@ -599,12 +683,12 @@ static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs, } static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, - const struct nlattr **a, bool is_mask) + const struct nlattr **a, bool is_mask, + bool log) { int err; - u64 orig_attrs = attrs; - err = metadata_from_nlattrs(match, &attrs, a, is_mask); + err = metadata_from_nlattrs(match, &attrs, a, is_mask, log); if (err) return err; @@ -625,17 +709,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); if (!(tci & htons(VLAN_TAG_PRESENT))) { if (is_mask) - OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n"); + OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit."); else - OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n"); + OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set."); return -EINVAL; } SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask); attrs &= ~(1 << OVS_KEY_ATTR_VLAN); - } else if (!is_mask) - SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); + } if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { __be16 eth_type; @@ -645,8 +728,8 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, /* Always exact match EtherType. */ eth_type = htons(0xffff); } else if (ntohs(eth_type) < ETH_P_802_3_MIN) { - OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n", - ntohs(eth_type), ETH_P_802_3_MIN); + OVS_NLERR(log, "EtherType %x is less than min %x", + ntohs(eth_type), ETH_P_802_3_MIN); return -EINVAL; } @@ -661,8 +744,8 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]); if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) { - OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n", - ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); + OVS_NLERR(log, "IPv4 frag type %d is out of range max %d", + ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; } SW_FLOW_KEY_PUT(match, ip.proto, @@ -685,8 +768,8 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]); if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) { - OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n", - ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); + OVS_NLERR(log, "IPv6 frag type %d is out of range max %d", + ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); return -EINVAL; } SW_FLOW_KEY_PUT(match, ipv6.label, @@ -716,7 +799,7 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, arp_key = nla_data(a[OVS_KEY_ATTR_ARP]); if (!is_mask && (arp_key->arp_op & htons(0xff00))) { - OVS_NLERR("Unknown ARP opcode (opcode=%d).\n", + OVS_NLERR(log, "Unknown ARP opcode (opcode=%d).", arp_key->arp_op); return -EINVAL; } @@ -735,6 +818,16 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, attrs &= ~(1 << OVS_KEY_ATTR_ARP); } + if (attrs & (1 << OVS_KEY_ATTR_MPLS)) { + const struct ovs_key_mpls *mpls_key; + + mpls_key = nla_data(a[OVS_KEY_ATTR_MPLS]); + SW_FLOW_KEY_PUT(match, mpls.top_lse, + mpls_key->mpls_lse, is_mask); + + attrs &= ~(1 << OVS_KEY_ATTR_MPLS); + } + if (attrs & (1 << OVS_KEY_ATTR_TCP)) { const struct ovs_key_tcp *tcp_key; @@ -745,15 +838,9 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, } if (attrs & (1 << OVS_KEY_ATTR_TCP_FLAGS)) { - if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) { - SW_FLOW_KEY_PUT(match, tp.flags, - nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), - is_mask); - } else { - SW_FLOW_KEY_PUT(match, tp.flags, - nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), - is_mask); - } + SW_FLOW_KEY_PUT(match, tp.flags, + nla_get_be16(a[OVS_KEY_ATTR_TCP_FLAGS]), + is_mask); attrs &= ~(1 << OVS_KEY_ATTR_TCP_FLAGS); } @@ -812,8 +899,11 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs, attrs &= ~(1 << OVS_KEY_ATTR_ND); } - if (attrs != 0) + if (attrs != 0) { + OVS_NLERR(log, "Unknown key attributes %llx", + (unsigned long long)attrs); return -EINVAL; + } return 0; } @@ -851,10 +941,14 @@ static void mask_set_nlattr(struct nlattr *attr, u8 val) * of this flow. * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink * attribute specifies the mask field of the wildcarded flow. + * @log: Boolean to allow kernel error logging. Normally true, but when + * probing for feature compatibility this should be passed in as false to + * suppress unnecessary error logging. */ int ovs_nla_get_match(struct sw_flow_match *match, - const struct nlattr *key, - const struct nlattr *mask) + const struct nlattr *nla_key, + const struct nlattr *nla_mask, + bool log) { const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; const struct nlattr *encap; @@ -864,7 +958,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, bool encap_valid = false; int err; - err = parse_flow_nlattrs(key, a, &key_attrs); + err = parse_flow_nlattrs(nla_key, a, &key_attrs, log); if (err) return err; @@ -875,7 +969,7 @@ int ovs_nla_get_match(struct sw_flow_match *match, if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) && (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) { - OVS_NLERR("Invalid Vlan frame.\n"); + OVS_NLERR(log, "Invalid Vlan frame."); return -EINVAL; } @@ -886,61 +980,68 @@ int ovs_nla_get_match(struct sw_flow_match *match, encap_valid = true; if (tci & htons(VLAN_TAG_PRESENT)) { - err = parse_flow_nlattrs(encap, a, &key_attrs); + err = parse_flow_nlattrs(encap, a, &key_attrs, log); if (err) return err; } else if (!tci) { /* Corner case for truncated 802.1Q header. */ if (nla_len(encap)) { - OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n"); + OVS_NLERR(log, "Truncated 802.1Q header has non-zero encap attribute."); return -EINVAL; } } else { - OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n"); + OVS_NLERR(log, "Encap attr is set for non-VLAN frame"); return -EINVAL; } } - err = ovs_key_from_nlattrs(match, key_attrs, a, false); + err = ovs_key_from_nlattrs(match, key_attrs, a, false, log); if (err) return err; - if (match->mask && !mask) { - /* Create an exact match mask. We need to set to 0xff all the - * 'match->mask' fields that have been touched in 'match->key'. - * We cannot simply memset 'match->mask', because padding bytes - * and fields not specified in 'match->key' should be left to 0. - * Instead, we use a stream of netlink attributes, copied from - * 'key' and set to 0xff: ovs_key_from_nlattrs() will take care - * of filling 'match->mask' appropriately. - */ - newmask = kmemdup(key, nla_total_size(nla_len(key)), - GFP_KERNEL); - if (!newmask) - return -ENOMEM; + if (match->mask) { + if (!nla_mask) { + /* Create an exact match mask. We need to set to 0xff + * all the 'match->mask' fields that have been touched + * in 'match->key'. We cannot simply memset + * 'match->mask', because padding bytes and fields not + * specified in 'match->key' should be left to 0. + * Instead, we use a stream of netlink attributes, + * copied from 'key' and set to 0xff. + * ovs_key_from_nlattrs() will take care of filling + * 'match->mask' appropriately. + */ + newmask = kmemdup(nla_key, + nla_total_size(nla_len(nla_key)), + GFP_KERNEL); + if (!newmask) + return -ENOMEM; - mask_set_nlattr(newmask, 0xff); + mask_set_nlattr(newmask, 0xff); - /* The userspace does not send tunnel attributes that are 0, - * but we should not wildcard them nonetheless. - */ - if (match->key->tun_key.ipv4_dst) - SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, 0xff, true); + /* The userspace does not send tunnel attributes that + * are 0, but we should not wildcard them nonetheless. + */ + if (match->key->tun_key.ipv4_dst) + SW_FLOW_KEY_MEMSET_FIELD(match, tun_key, + 0xff, true); - mask = newmask; - } + nla_mask = newmask; + } - if (mask) { - err = parse_flow_mask_nlattrs(mask, a, &mask_attrs); + err = parse_flow_mask_nlattrs(nla_mask, a, &mask_attrs, log); if (err) goto free_newmask; + /* Always match on tci. */ + SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true); + if (mask_attrs & 1 << OVS_KEY_ATTR_ENCAP) { __be16 eth_type = 0; __be16 tci = 0; if (!encap_valid) { - OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n"); + OVS_NLERR(log, "Encap mask attribute is set for non-VLAN frame."); err = -EINVAL; goto free_newmask; } @@ -952,12 +1053,13 @@ int ovs_nla_get_match(struct sw_flow_match *match, if (eth_type == htons(0xffff)) { mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); encap = a[OVS_KEY_ATTR_ENCAP]; - err = parse_flow_mask_nlattrs(encap, a, &mask_attrs); + err = parse_flow_mask_nlattrs(encap, a, + &mask_attrs, log); if (err) goto free_newmask; } else { - OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n", - ntohs(eth_type)); + OVS_NLERR(log, "VLAN frames must have an exact match on the TPID (mask=%x).", + ntohs(eth_type)); err = -EINVAL; goto free_newmask; } @@ -966,18 +1068,19 @@ int ovs_nla_get_match(struct sw_flow_match *match, tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); if (!(tci & htons(VLAN_TAG_PRESENT))) { - OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci)); + OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).", + ntohs(tci)); err = -EINVAL; goto free_newmask; } } - err = ovs_key_from_nlattrs(match, mask_attrs, a, true); + err = ovs_key_from_nlattrs(match, mask_attrs, a, true, log); if (err) goto free_newmask; } - if (!match_validate(match, key_attrs, mask_attrs)) + if (!match_validate(match, key_attrs, mask_attrs, log)) err = -EINVAL; free_newmask: @@ -990,6 +1093,9 @@ free_newmask: * @key: Receives extracted in_port, priority, tun_key and skb_mark. * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute * sequence. + * @log: Boolean to allow kernel error logging. Normally true, but when + * probing for feature compatibility this should be passed in as false to + * suppress unnecessary error logging. * * This parses a series of Netlink attributes that form a flow key, which must * take the same form accepted by flow_from_nlattrs(), but only enough of it to @@ -998,14 +1104,15 @@ free_newmask: */ int ovs_nla_get_flow_metadata(const struct nlattr *attr, - struct sw_flow_key *key) + struct sw_flow_key *key, + bool log) { const struct nlattr *a[OVS_KEY_ATTR_MAX + 1]; struct sw_flow_match match; u64 attrs = 0; int err; - err = parse_flow_nlattrs(attr, a, &attrs); + err = parse_flow_nlattrs(attr, a, &attrs, log); if (err) return -EINVAL; @@ -1014,7 +1121,7 @@ int ovs_nla_get_flow_metadata(const struct nlattr *attr, key->phy.in_port = DP_MAX_PORTS; - return metadata_from_nlattrs(&match, &attrs, a, false); + return metadata_from_nlattrs(&match, &attrs, a, false, log); } int ovs_nla_put_flow(const struct sw_flow_key *swkey, @@ -1140,6 +1247,14 @@ int ovs_nla_put_flow(const struct sw_flow_key *swkey, arp_key->arp_op = htons(output->ip.proto); ether_addr_copy(arp_key->arp_sha, output->ipv4.arp.sha); ether_addr_copy(arp_key->arp_tha, output->ipv4.arp.tha); + } else if (eth_p_mpls(swkey->eth.type)) { + struct ovs_key_mpls *mpls_key; + + nla = nla_reserve(skb, OVS_KEY_ATTR_MPLS, sizeof(*mpls_key)); + if (!nla) + goto nla_put_failure; + mpls_key = nla_data(nla); + mpls_key->mpls_lse = output->mpls.top_lse; } if ((swkey->eth.type == htons(ETH_P_IP) || @@ -1226,12 +1341,14 @@ nla_put_failure: #define MAX_ACTIONS_BUFSIZE (32 * 1024) -struct sw_flow_actions *ovs_nla_alloc_flow_actions(int size) +static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) { struct sw_flow_actions *sfa; - if (size > MAX_ACTIONS_BUFSIZE) + if (size > MAX_ACTIONS_BUFSIZE) { + OVS_NLERR(log, "Flow action size %u bytes exceeds max", size); return ERR_PTR(-EINVAL); + } sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); if (!sfa) @@ -1249,7 +1366,7 @@ void ovs_nla_free_flow_actions(struct sw_flow_actions *sf_acts) } static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, - int attr_len) + int attr_len, bool log) { struct sw_flow_actions *acts; @@ -1269,7 +1386,7 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, new_acts_size = MAX_ACTIONS_BUFSIZE; } - acts = ovs_nla_alloc_flow_actions(new_acts_size); + acts = nla_alloc_flow_actions(new_acts_size, log); if (IS_ERR(acts)) return (void *)acts; @@ -1284,11 +1401,11 @@ out: } static struct nlattr *__add_action(struct sw_flow_actions **sfa, - int attrtype, void *data, int len) + int attrtype, void *data, int len, bool log) { struct nlattr *a; - a = reserve_sfa_size(sfa, nla_attr_size(len)); + a = reserve_sfa_size(sfa, nla_attr_size(len), log); if (IS_ERR(a)) return a; @@ -1303,24 +1420,22 @@ static struct nlattr *__add_action(struct sw_flow_actions **sfa, } static int add_action(struct sw_flow_actions **sfa, int attrtype, - void *data, int len) + void *data, int len, bool log) { struct nlattr *a; - a = __add_action(sfa, attrtype, data, len); - if (IS_ERR(a)) - return PTR_ERR(a); + a = __add_action(sfa, attrtype, data, len, log); - return 0; + return PTR_ERR_OR_ZERO(a); } static inline int add_nested_action_start(struct sw_flow_actions **sfa, - int attrtype) + int attrtype, bool log) { int used = (*sfa)->actions_len; int err; - err = add_action(sfa, attrtype, NULL, 0); + err = add_action(sfa, attrtype, NULL, 0, log); if (err) return err; @@ -1336,9 +1451,15 @@ static inline void add_nested_action_end(struct sw_flow_actions *sfa, a->nla_len = sfa->actions_len - st_offset; } +static int __ovs_nla_copy_actions(const struct nlattr *attr, + const struct sw_flow_key *key, + int depth, struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, bool log); + static int validate_and_copy_sample(const struct nlattr *attr, const struct sw_flow_key *key, int depth, - struct sw_flow_actions **sfa) + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, bool log) { const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; const struct nlattr *probability, *actions; @@ -1364,18 +1485,19 @@ static int validate_and_copy_sample(const struct nlattr *attr, return -EINVAL; /* validation done, copy sample action. */ - start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE); + start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SAMPLE, log); if (start < 0) return start; err = add_action(sfa, OVS_SAMPLE_ATTR_PROBABILITY, - nla_data(probability), sizeof(u32)); + nla_data(probability), sizeof(u32), log); if (err) return err; - st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS); + st_acts = add_nested_action_start(sfa, OVS_SAMPLE_ATTR_ACTIONS, log); if (st_acts < 0) return st_acts; - err = ovs_nla_copy_actions(actions, key, depth + 1, sfa); + err = __ovs_nla_copy_actions(actions, key, depth + 1, sfa, + eth_type, vlan_tci, log); if (err) return err; @@ -1385,10 +1507,10 @@ static int validate_and_copy_sample(const struct nlattr *attr, return 0; } -static int validate_tp_port(const struct sw_flow_key *flow_key) +static int validate_tp_port(const struct sw_flow_key *flow_key, + __be16 eth_type) { - if ((flow_key->eth.type == htons(ETH_P_IP) || - flow_key->eth.type == htons(ETH_P_IPV6)) && + if ((eth_type == htons(ETH_P_IP) || eth_type == htons(ETH_P_IPV6)) && (flow_key->tp.src || flow_key->tp.dst)) return 0; @@ -1412,7 +1534,7 @@ void ovs_match_init(struct sw_flow_match *match, } static int validate_and_copy_set_tun(const struct nlattr *attr, - struct sw_flow_actions **sfa) + struct sw_flow_actions **sfa, bool log) { struct sw_flow_match match; struct sw_flow_key key; @@ -1421,7 +1543,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, int err, start; ovs_match_init(&match, &key, NULL); - err = ipv4_tun_from_nlattr(nla_data(attr), &match, false); + err = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log); if (err) return err; @@ -1450,12 +1572,12 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, key.tun_key.tun_flags |= crit_opt ? TUNNEL_CRIT_OPT : 0; }; - start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET); + start = add_nested_action_start(sfa, OVS_ACTION_ATTR_SET, log); if (start < 0) return start; a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL, - sizeof(*tun_info) + key.tun_opts_len); + sizeof(*tun_info) + key.tun_opts_len, log); if (IS_ERR(a)) return PTR_ERR(a); @@ -1483,7 +1605,7 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, static int validate_set(const struct nlattr *a, const struct sw_flow_key *flow_key, struct sw_flow_actions **sfa, - bool *set_tun) + bool *set_tun, __be16 eth_type, bool log) { const struct nlattr *ovs_key = nla_data(a); int key_type = nla_type(ovs_key); @@ -1508,14 +1630,17 @@ static int validate_set(const struct nlattr *a, break; case OVS_KEY_ATTR_TUNNEL: + if (eth_p_mpls(eth_type)) + return -EINVAL; + *set_tun = true; - err = validate_and_copy_set_tun(a, sfa); + err = validate_and_copy_set_tun(a, sfa, log); if (err) return err; break; case OVS_KEY_ATTR_IPV4: - if (flow_key->eth.type != htons(ETH_P_IP)) + if (eth_type != htons(ETH_P_IP)) return -EINVAL; if (!flow_key->ip.proto) @@ -1531,7 +1656,7 @@ static int validate_set(const struct nlattr *a, break; case OVS_KEY_ATTR_IPV6: - if (flow_key->eth.type != htons(ETH_P_IPV6)) + if (eth_type != htons(ETH_P_IPV6)) return -EINVAL; if (!flow_key->ip.proto) @@ -1553,19 +1678,24 @@ static int validate_set(const struct nlattr *a, if (flow_key->ip.proto != IPPROTO_TCP) return -EINVAL; - return validate_tp_port(flow_key); + return validate_tp_port(flow_key, eth_type); case OVS_KEY_ATTR_UDP: if (flow_key->ip.proto != IPPROTO_UDP) return -EINVAL; - return validate_tp_port(flow_key); + return validate_tp_port(flow_key, eth_type); + + case OVS_KEY_ATTR_MPLS: + if (!eth_p_mpls(eth_type)) + return -EINVAL; + break; case OVS_KEY_ATTR_SCTP: if (flow_key->ip.proto != IPPROTO_SCTP) return -EINVAL; - return validate_tp_port(flow_key); + return validate_tp_port(flow_key, eth_type); default: return -EINVAL; @@ -1579,6 +1709,7 @@ static int validate_userspace(const struct nlattr *attr) static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC }, + [OVS_USERSPACE_ATTR_EGRESS_TUN_PORT] = {.type = NLA_U32 }, }; struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; int error; @@ -1596,12 +1727,12 @@ static int validate_userspace(const struct nlattr *attr) } static int copy_action(const struct nlattr *from, - struct sw_flow_actions **sfa) + struct sw_flow_actions **sfa, bool log) { int totlen = NLA_ALIGN(from->nla_len); struct nlattr *to; - to = reserve_sfa_size(sfa, from->nla_len); + to = reserve_sfa_size(sfa, from->nla_len, log); if (IS_ERR(to)) return PTR_ERR(to); @@ -1609,12 +1740,13 @@ static int copy_action(const struct nlattr *from, return 0; } -int ovs_nla_copy_actions(const struct nlattr *attr, - const struct sw_flow_key *key, - int depth, - struct sw_flow_actions **sfa) +static int __ovs_nla_copy_actions(const struct nlattr *attr, + const struct sw_flow_key *key, + int depth, struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, bool log) { const struct nlattr *a; + bool out_tnl_port = false; int rem, err; if (depth >= SAMPLE_ACTION_DEPTH) @@ -1626,6 +1758,8 @@ int ovs_nla_copy_actions(const struct nlattr *attr, [OVS_ACTION_ATTR_OUTPUT] = sizeof(u32), [OVS_ACTION_ATTR_RECIRC] = sizeof(u32), [OVS_ACTION_ATTR_USERSPACE] = (u32)-1, + [OVS_ACTION_ATTR_PUSH_MPLS] = sizeof(struct ovs_action_push_mpls), + [OVS_ACTION_ATTR_POP_MPLS] = sizeof(__be16), [OVS_ACTION_ATTR_PUSH_VLAN] = sizeof(struct ovs_action_push_vlan), [OVS_ACTION_ATTR_POP_VLAN] = 0, [OVS_ACTION_ATTR_SET] = (u32)-1, @@ -1655,6 +1789,8 @@ int ovs_nla_copy_actions(const struct nlattr *attr, case OVS_ACTION_ATTR_OUTPUT: if (nla_get_u32(a) >= DP_MAX_PORTS) return -EINVAL; + out_tnl_port = false; + break; case OVS_ACTION_ATTR_HASH: { @@ -1671,6 +1807,7 @@ int ovs_nla_copy_actions(const struct nlattr *attr, } case OVS_ACTION_ATTR_POP_VLAN: + vlan_tci = htons(0); break; case OVS_ACTION_ATTR_PUSH_VLAN: @@ -1679,29 +1816,77 @@ int ovs_nla_copy_actions(const struct nlattr *attr, return -EINVAL; if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) return -EINVAL; + vlan_tci = vlan->vlan_tci; break; case OVS_ACTION_ATTR_RECIRC: break; + case OVS_ACTION_ATTR_PUSH_MPLS: { + const struct ovs_action_push_mpls *mpls = nla_data(a); + + /* Networking stack do not allow simultaneous Tunnel + * and MPLS GSO. + */ + if (out_tnl_port) + return -EINVAL; + + if (!eth_p_mpls(mpls->mpls_ethertype)) + return -EINVAL; + /* Prohibit push MPLS other than to a white list + * for packets that have a known tag order. + */ + if (vlan_tci & htons(VLAN_TAG_PRESENT) || + (eth_type != htons(ETH_P_IP) && + eth_type != htons(ETH_P_IPV6) && + eth_type != htons(ETH_P_ARP) && + eth_type != htons(ETH_P_RARP) && + !eth_p_mpls(eth_type))) + return -EINVAL; + eth_type = mpls->mpls_ethertype; + break; + } + + case OVS_ACTION_ATTR_POP_MPLS: + if (vlan_tci & htons(VLAN_TAG_PRESENT) || + !eth_p_mpls(eth_type)) + return -EINVAL; + + /* Disallow subsequent L2.5+ set and mpls_pop actions + * as there is no check here to ensure that the new + * eth_type is valid and thus set actions could + * write off the end of the packet or otherwise + * corrupt it. + * + * Support for these actions is planned using packet + * recirculation. + */ + eth_type = htons(0); + break; + case OVS_ACTION_ATTR_SET: - err = validate_set(a, key, sfa, &skip_copy); + err = validate_set(a, key, sfa, + &out_tnl_port, eth_type, log); if (err) return err; + + skip_copy = out_tnl_port; break; case OVS_ACTION_ATTR_SAMPLE: - err = validate_and_copy_sample(a, key, depth, sfa); + err = validate_and_copy_sample(a, key, depth, sfa, + eth_type, vlan_tci, log); if (err) return err; skip_copy = true; break; default: + OVS_NLERR(log, "Unknown Action type %d", type); return -EINVAL; } if (!skip_copy) { - err = copy_action(a, sfa); + err = copy_action(a, sfa, log); if (err) return err; } @@ -1713,6 +1898,24 @@ int ovs_nla_copy_actions(const struct nlattr *attr, return 0; } +int ovs_nla_copy_actions(const struct nlattr *attr, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, bool log) +{ + int err; + + *sfa = nla_alloc_flow_actions(nla_len(attr), log); + if (IS_ERR(*sfa)) + return PTR_ERR(*sfa); + + err = __ovs_nla_copy_actions(attr, key, 0, sfa, key->eth.type, + key->eth.tci, log); + if (err) + kfree(*sfa); + + return err; +} + static int sample_action_to_attr(const struct nlattr *attr, struct sk_buff *skb) { const struct nlattr *a; diff --git a/net/openvswitch/flow_netlink.h b/net/openvswitch/flow_netlink.h index 206e45add888..577f12be3459 100644 --- a/net/openvswitch/flow_netlink.h +++ b/net/openvswitch/flow_netlink.h @@ -37,24 +37,28 @@ #include "flow.h" +size_t ovs_tun_key_attr_size(void); +size_t ovs_key_attr_size(void); + void ovs_match_init(struct sw_flow_match *match, struct sw_flow_key *key, struct sw_flow_mask *mask); int ovs_nla_put_flow(const struct sw_flow_key *, const struct sw_flow_key *, struct sk_buff *); -int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *); +int ovs_nla_get_flow_metadata(const struct nlattr *, struct sw_flow_key *, + bool log); -int ovs_nla_get_match(struct sw_flow_match *match, - const struct nlattr *, - const struct nlattr *); +int ovs_nla_get_match(struct sw_flow_match *, const struct nlattr *key, + const struct nlattr *mask, bool log); +int ovs_nla_put_egress_tunnel_key(struct sk_buff *, + const struct ovs_tunnel_info *); int ovs_nla_copy_actions(const struct nlattr *attr, - const struct sw_flow_key *key, int depth, - struct sw_flow_actions **sfa); + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, bool log); int ovs_nla_put_actions(const struct nlattr *attr, int len, struct sk_buff *skb); -struct sw_flow_actions *ovs_nla_alloc_flow_actions(int actions_len); void ovs_nla_free_flow_actions(struct sw_flow_actions *); #endif /* flow_netlink.h */ diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index cf2d853646f0..e0a7fefc1edf 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2007-2013 Nicira, Inc. + * Copyright (c) 2007-2014 Nicira, Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public @@ -107,7 +107,7 @@ err: return ERR_PTR(-ENOMEM); } -int ovs_flow_tbl_count(struct flow_table *table) +int ovs_flow_tbl_count(const struct flow_table *table) { return table->count; } @@ -250,11 +250,14 @@ skip_flows: __table_instance_destroy(ti); } -void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred) +/* No need for locking this function is called from RCU callback or + * error path. + */ +void ovs_flow_tbl_destroy(struct flow_table *table) { - struct table_instance *ti = ovsl_dereference(table->ti); + struct table_instance *ti = rcu_dereference_raw(table->ti); - table_instance_destroy(ti, deferred); + table_instance_destroy(ti, false); } struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti, @@ -398,7 +401,7 @@ static bool flow_cmp_masked_key(const struct sw_flow *flow, } bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, - struct sw_flow_match *match) + const struct sw_flow_match *match) { struct sw_flow_key *key = match->key; int key_start = flow_key_start(key); @@ -409,7 +412,7 @@ bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, static struct sw_flow *masked_flow_lookup(struct table_instance *ti, const struct sw_flow_key *unmasked, - struct sw_flow_mask *mask) + const struct sw_flow_mask *mask) { struct sw_flow *flow; struct hlist_head *head; @@ -457,7 +460,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl, } struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, - struct sw_flow_match *match) + const struct sw_flow_match *match) { struct table_instance *ti = rcu_dereference_ovsl(tbl->ti); struct sw_flow_mask *mask; @@ -560,7 +563,7 @@ static struct sw_flow_mask *flow_mask_find(const struct flow_table *tbl, /* Add 'mask' into the mask list, if it is not already there. */ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, - struct sw_flow_mask *new) + const struct sw_flow_mask *new) { struct sw_flow_mask *mask; mask = flow_mask_find(tbl, new); @@ -583,7 +586,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow, /* Must be called with OVS mutex held. */ int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, - struct sw_flow_mask *mask) + const struct sw_flow_mask *mask) { struct table_instance *new_ti = NULL; struct table_instance *ti; diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h index 5918bff7f3f6..309fa6415689 100644 --- a/net/openvswitch/flow_table.h +++ b/net/openvswitch/flow_table.h @@ -61,12 +61,12 @@ struct sw_flow *ovs_flow_alloc(void); void ovs_flow_free(struct sw_flow *, bool deferred); int ovs_flow_tbl_init(struct flow_table *); -int ovs_flow_tbl_count(struct flow_table *table); -void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred); +int ovs_flow_tbl_count(const struct flow_table *table); +void ovs_flow_tbl_destroy(struct flow_table *table); int ovs_flow_tbl_flush(struct flow_table *flow_table); int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow, - struct sw_flow_mask *mask); + const struct sw_flow_mask *mask); void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow); int ovs_flow_tbl_num_masks(const struct flow_table *table); struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table, @@ -77,9 +77,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *, struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, const struct sw_flow_key *); struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl, - struct sw_flow_match *match); + const struct sw_flow_match *match); bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, - struct sw_flow_match *match); + const struct sw_flow_match *match); void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, const struct sw_flow_mask *mask); diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c index 106a9d80b663..347fa2325b22 100644 --- a/net/openvswitch/vport-geneve.c +++ b/net/openvswitch/vport-geneve.c @@ -17,6 +17,7 @@ #include <linux/rculist.h> #include <linux/udp.h> #include <linux/if_vlan.h> +#include <linux/module.h> #include <net/geneve.h> #include <net/icmp.h> @@ -28,6 +29,8 @@ #include "datapath.h" #include "vport.h" +static struct vport_ops ovs_geneve_vport_ops; + /** * struct geneve_port - Keeps track of open UDP ports * @gs: The socket created for this port number. @@ -65,7 +68,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) } /* Convert 24 bit VNI to 64 bit tunnel ID. */ -static __be64 vni_to_tunnel_id(__u8 *vni) +static __be64 vni_to_tunnel_id(const __u8 *vni) { #ifdef __BIG_ENDIAN return (vni[0] << 16) | (vni[1] << 8) | vni[2]; @@ -94,7 +97,9 @@ static void geneve_rcv(struct geneve_sock *gs, struct sk_buff *skb) key = vni_to_tunnel_id(geneveh->vni); - ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, flags, + ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), + udp_hdr(skb)->source, udp_hdr(skb)->dest, + key, flags, geneveh->options, opts_len); ovs_vport_receive(vport, skb, &tun_info); @@ -225,11 +230,46 @@ static const char *geneve_get_name(const struct vport *vport) return geneve_port->name; } -const struct vport_ops ovs_geneve_vport_ops = { +static int geneve_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *egress_tun_info) +{ + struct geneve_port *geneve_port = geneve_vport(vport); + struct net *net = ovs_dp_get_net(vport->dp); + __be16 dport = inet_sk(geneve_port->gs->sock->sk)->inet_sport; + __be16 sport = udp_flow_src_port(net, skb, 1, USHRT_MAX, true); + + /* Get tp_src and tp_dst, refert to geneve_build_header(). + */ + return ovs_tunnel_get_egress_info(egress_tun_info, + ovs_dp_get_net(vport->dp), + OVS_CB(skb)->egress_tun_info, + IPPROTO_UDP, skb->mark, sport, dport); +} + +static struct vport_ops ovs_geneve_vport_ops = { .type = OVS_VPORT_TYPE_GENEVE, .create = geneve_tnl_create, .destroy = geneve_tnl_destroy, .get_name = geneve_get_name, .get_options = geneve_get_options, .send = geneve_tnl_send, + .owner = THIS_MODULE, + .get_egress_tun_info = geneve_get_egress_tun_info, }; + +static int __init ovs_geneve_tnl_init(void) +{ + return ovs_vport_ops_register(&ovs_geneve_vport_ops); +} + +static void __exit ovs_geneve_tnl_exit(void) +{ + ovs_vport_ops_unregister(&ovs_geneve_vport_ops); +} + +module_init(ovs_geneve_tnl_init); +module_exit(ovs_geneve_tnl_exit); + +MODULE_DESCRIPTION("OVS: Geneve swiching port"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("vport-type-5"); diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c index 108b82da2fd9..6b69df545b1d 100644 --- a/net/openvswitch/vport-gre.c +++ b/net/openvswitch/vport-gre.c @@ -29,6 +29,7 @@ #include <linux/jhash.h> #include <linux/list.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/workqueue.h> #include <linux/rculist.h> #include <net/route.h> @@ -45,6 +46,8 @@ #include "datapath.h" #include "vport.h" +static struct vport_ops ovs_gre_vport_ops; + /* Returns the least-significant 32 bits of a __be64. */ static __be32 be64_get_low32(__be64 x) { @@ -105,7 +108,7 @@ static int gre_rcv(struct sk_buff *skb, return PACKET_REJECT; key = key_to_tunnel_id(tpi->key, tpi->seq); - ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), key, + ovs_flow_tun_info_init(&tun_info, ip_hdr(skb), 0, 0, key, filter_tnl_flags(tpi->flags), NULL, 0); ovs_vport_receive(vport, skb, &tun_info); @@ -172,14 +175,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) goto err_free_rt; } - if (vlan_tx_tag_present(skb)) { - if (unlikely(!__vlan_put_tag(skb, - skb->vlan_proto, - vlan_tx_tag_get(skb)))) { - err = -ENOMEM; - goto err_free_rt; - } - skb->vlan_tci = 0; + skb = vlan_hwaccel_push_inside(skb); + if (unlikely(!skb)) { + err = -ENOMEM; + goto err_free_rt; } /* Push Tunnel header. */ @@ -281,10 +280,38 @@ static void gre_tnl_destroy(struct vport *vport) gre_exit(); } -const struct vport_ops ovs_gre_vport_ops = { +static int gre_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *egress_tun_info) +{ + return ovs_tunnel_get_egress_info(egress_tun_info, + ovs_dp_get_net(vport->dp), + OVS_CB(skb)->egress_tun_info, + IPPROTO_GRE, skb->mark, 0, 0); +} + +static struct vport_ops ovs_gre_vport_ops = { .type = OVS_VPORT_TYPE_GRE, .create = gre_create, .destroy = gre_tnl_destroy, .get_name = gre_get_name, .send = gre_tnl_send, + .get_egress_tun_info = gre_get_egress_tun_info, + .owner = THIS_MODULE, }; + +static int __init ovs_gre_tnl_init(void) +{ + return ovs_vport_ops_register(&ovs_gre_vport_ops); +} + +static void __exit ovs_gre_tnl_exit(void) +{ + ovs_vport_ops_unregister(&ovs_gre_vport_ops); +} + +module_init(ovs_gre_tnl_init); +module_exit(ovs_gre_tnl_exit); + +MODULE_DESCRIPTION("OVS: GRE switching port"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("vport-type-3"); diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 84516126e5f3..6a55f7105505 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -36,6 +36,8 @@ struct internal_dev { struct vport *vport; }; +static struct vport_ops ovs_internal_vport_ops; + static struct internal_dev *internal_dev_priv(struct net_device *netdev) { return netdev_priv(netdev); @@ -222,6 +224,11 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) struct net_device *netdev = netdev_vport_priv(vport)->dev; int len; + if (unlikely(!(netdev->flags & IFF_UP))) { + kfree_skb(skb); + return 0; + } + len = skb->len; skb_dst_drop(skb); @@ -238,7 +245,7 @@ static int internal_dev_recv(struct vport *vport, struct sk_buff *skb) return len; } -const struct vport_ops ovs_internal_vport_ops = { +static struct vport_ops ovs_internal_vport_ops = { .type = OVS_VPORT_TYPE_INTERNAL, .create = internal_dev_create, .destroy = internal_dev_destroy, @@ -261,10 +268,21 @@ struct vport *ovs_internal_dev_get_vport(struct net_device *netdev) int ovs_internal_dev_rtnl_link_register(void) { - return rtnl_link_register(&internal_dev_link_ops); + int err; + + err = rtnl_link_register(&internal_dev_link_ops); + if (err < 0) + return err; + + err = ovs_vport_ops_register(&ovs_internal_vport_ops); + if (err < 0) + rtnl_link_unregister(&internal_dev_link_ops); + + return err; } void ovs_internal_dev_rtnl_link_unregister(void) { + ovs_vport_ops_unregister(&ovs_internal_vport_ops); rtnl_link_unregister(&internal_dev_link_ops); } diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index d21f77d875ba..4776282c6417 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -33,6 +33,8 @@ #include "vport-internal_dev.h" #include "vport-netdev.h" +static struct vport_ops ovs_netdev_vport_ops; + /* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { @@ -75,7 +77,7 @@ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb) return RX_HANDLER_CONSUMED; } -static struct net_device *get_dpdev(struct datapath *dp) +static struct net_device *get_dpdev(const struct datapath *dp) { struct vport *local; @@ -224,10 +226,20 @@ struct vport *ovs_netdev_get_vport(struct net_device *dev) return NULL; } -const struct vport_ops ovs_netdev_vport_ops = { +static struct vport_ops ovs_netdev_vport_ops = { .type = OVS_VPORT_TYPE_NETDEV, .create = netdev_create, .destroy = netdev_destroy, .get_name = ovs_netdev_get_name, .send = netdev_send, }; + +int __init ovs_netdev_init(void) +{ + return ovs_vport_ops_register(&ovs_netdev_vport_ops); +} + +void ovs_netdev_exit(void) +{ + ovs_vport_ops_unregister(&ovs_netdev_vport_ops); +} diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h index 8df01c1127e5..6f7038e79c52 100644 --- a/net/openvswitch/vport-netdev.h +++ b/net/openvswitch/vport-netdev.h @@ -41,4 +41,7 @@ netdev_vport_priv(const struct vport *vport) const char *ovs_netdev_get_name(const struct vport *); void ovs_netdev_detach_dev(struct vport *); +int __init ovs_netdev_init(void); +void ovs_netdev_exit(void); + #endif /* vport_netdev.h */ diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 2735e01dca73..38f95a52241b 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -24,6 +24,7 @@ #include <linux/net.h> #include <linux/rculist.h> #include <linux/udp.h> +#include <linux/module.h> #include <net/icmp.h> #include <net/ip.h> @@ -50,6 +51,8 @@ struct vxlan_port { char name[IFNAMSIZ]; }; +static struct vport_ops ovs_vxlan_vport_ops; + static inline struct vxlan_port *vxlan_vport(const struct vport *vport) { return vport_priv(vport); @@ -66,7 +69,9 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni) /* Save outer tunnel values */ iph = ip_hdr(skb); key = cpu_to_be64(ntohl(vx_vni) >> 8); - ovs_flow_tun_info_init(&tun_info, iph, key, TUNNEL_KEY, NULL, 0); + ovs_flow_tun_info_init(&tun_info, iph, + udp_hdr(skb)->source, udp_hdr(skb)->dest, + key, TUNNEL_KEY, NULL, 0); ovs_vport_receive(vport, skb, &tun_info); } @@ -186,17 +191,55 @@ error: return err; } +static int vxlan_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *egress_tun_info) +{ + struct net *net = ovs_dp_get_net(vport->dp); + struct vxlan_port *vxlan_port = vxlan_vport(vport); + __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport; + __be16 src_port; + int port_min; + int port_max; + + inet_get_local_port_range(net, &port_min, &port_max); + src_port = udp_flow_src_port(net, skb, 0, 0, true); + + return ovs_tunnel_get_egress_info(egress_tun_info, net, + OVS_CB(skb)->egress_tun_info, + IPPROTO_UDP, skb->mark, + src_port, dst_port); +} + static const char *vxlan_get_name(const struct vport *vport) { struct vxlan_port *vxlan_port = vxlan_vport(vport); return vxlan_port->name; } -const struct vport_ops ovs_vxlan_vport_ops = { +static struct vport_ops ovs_vxlan_vport_ops = { .type = OVS_VPORT_TYPE_VXLAN, .create = vxlan_tnl_create, .destroy = vxlan_tnl_destroy, .get_name = vxlan_get_name, .get_options = vxlan_get_options, .send = vxlan_tnl_send, + .get_egress_tun_info = vxlan_get_egress_tun_info, + .owner = THIS_MODULE, }; + +static int __init ovs_vxlan_tnl_init(void) +{ + return ovs_vport_ops_register(&ovs_vxlan_vport_ops); +} + +static void __exit ovs_vxlan_tnl_exit(void) +{ + ovs_vport_ops_unregister(&ovs_vxlan_vport_ops); +} + +module_init(ovs_vxlan_tnl_init); +module_exit(ovs_vxlan_tnl_exit); + +MODULE_DESCRIPTION("OVS: VXLAN switching port"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("vport-type-4"); diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 6015802ebe6f..e771a46933e5 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -28,6 +28,7 @@ #include <linux/rtnetlink.h> #include <linux/compat.h> #include <net/net_namespace.h> +#include <linux/module.h> #include "datapath.h" #include "vport.h" @@ -36,22 +37,7 @@ static void ovs_vport_record_error(struct vport *, enum vport_err_type err_type); -/* List of statically compiled vport implementations. Don't forget to also - * add yours to the list at the bottom of vport.h. */ -static const struct vport_ops *vport_ops_list[] = { - &ovs_netdev_vport_ops, - &ovs_internal_vport_ops, - -#ifdef CONFIG_OPENVSWITCH_GRE - &ovs_gre_vport_ops, -#endif -#ifdef CONFIG_OPENVSWITCH_VXLAN - &ovs_vxlan_vport_ops, -#endif -#ifdef CONFIG_OPENVSWITCH_GENEVE - &ovs_geneve_vport_ops, -#endif -}; +static LIST_HEAD(vport_ops_list); /* Protected by RCU read lock for reading, ovs_mutex for writing. */ static struct hlist_head *dev_table; @@ -82,12 +68,38 @@ void ovs_vport_exit(void) kfree(dev_table); } -static struct hlist_head *hash_bucket(struct net *net, const char *name) +static struct hlist_head *hash_bucket(const struct net *net, const char *name) { unsigned int hash = jhash(name, strlen(name), (unsigned long) net); return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)]; } +int ovs_vport_ops_register(struct vport_ops *ops) +{ + int err = -EEXIST; + struct vport_ops *o; + + ovs_lock(); + list_for_each_entry(o, &vport_ops_list, list) + if (ops->type == o->type) + goto errout; + + list_add_tail(&ops->list, &vport_ops_list); + err = 0; +errout: + ovs_unlock(); + return err; +} +EXPORT_SYMBOL_GPL(ovs_vport_ops_register); + +void ovs_vport_ops_unregister(struct vport_ops *ops) +{ + ovs_lock(); + list_del(&ops->list); + ovs_unlock(); +} +EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister); + /** * ovs_vport_locate - find a port that has already been created * @@ -95,7 +107,7 @@ static struct hlist_head *hash_bucket(struct net *net, const char *name) * * Must be called with ovs or RCU read lock. */ -struct vport *ovs_vport_locate(struct net *net, const char *name) +struct vport *ovs_vport_locate(const struct net *net, const char *name) { struct hlist_head *bucket = hash_bucket(net, name); struct vport *vport; @@ -153,6 +165,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops, return vport; } +EXPORT_SYMBOL_GPL(ovs_vport_alloc); /** * ovs_vport_free - uninitialize and free vport @@ -173,6 +186,18 @@ void ovs_vport_free(struct vport *vport) free_percpu(vport->percpu_stats); kfree(vport); } +EXPORT_SYMBOL_GPL(ovs_vport_free); + +static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms) +{ + struct vport_ops *ops; + + list_for_each_entry(ops, &vport_ops_list, list) + if (ops->type == parms->type) + return ops; + + return NULL; +} /** * ovs_vport_add - add vport device (for kernel callers) @@ -184,31 +209,40 @@ void ovs_vport_free(struct vport *vport) */ struct vport *ovs_vport_add(const struct vport_parms *parms) { + struct vport_ops *ops; struct vport *vport; - int err = 0; - int i; - for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) { - if (vport_ops_list[i]->type == parms->type) { - struct hlist_head *bucket; + ops = ovs_vport_lookup(parms); + if (ops) { + struct hlist_head *bucket; - vport = vport_ops_list[i]->create(parms); - if (IS_ERR(vport)) { - err = PTR_ERR(vport); - goto out; - } + if (!try_module_get(ops->owner)) + return ERR_PTR(-EAFNOSUPPORT); - bucket = hash_bucket(ovs_dp_get_net(vport->dp), - vport->ops->get_name(vport)); - hlist_add_head_rcu(&vport->hash_node, bucket); + vport = ops->create(parms); + if (IS_ERR(vport)) { + module_put(ops->owner); return vport; } + + bucket = hash_bucket(ovs_dp_get_net(vport->dp), + vport->ops->get_name(vport)); + hlist_add_head_rcu(&vport->hash_node, bucket); + return vport; } - err = -EAFNOSUPPORT; + /* Unlock to attempt module load and return -EAGAIN if load + * was successful as we need to restart the port addition + * workflow. + */ + ovs_unlock(); + request_module("vport-type-%d", parms->type); + ovs_lock(); -out: - return ERR_PTR(err); + if (!ovs_vport_lookup(parms)) + return ERR_PTR(-EAFNOSUPPORT); + else + return ERR_PTR(-EAGAIN); } /** @@ -242,6 +276,8 @@ void ovs_vport_del(struct vport *vport) hlist_del_rcu(&vport->hash_node); vport->ops->destroy(vport); + + module_put(vport->ops->owner); } /** @@ -344,7 +380,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb) * * Must be called with ovs_mutex. */ -int ovs_vport_set_upcall_portids(struct vport *vport, struct nlattr *ids) +int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids) { struct vport_portids *old, *vport_portids; @@ -435,7 +471,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb) * skb->data should point to the Ethernet header. */ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, - struct ovs_tunnel_info *tun_info) + const struct ovs_tunnel_info *tun_info) { struct pcpu_sw_netstats *stats; struct sw_flow_key key; @@ -457,6 +493,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, } ovs_dp_process_packet(skb, &key); } +EXPORT_SYMBOL_GPL(ovs_vport_receive); /** * ovs_vport_send - send a packet on a device @@ -535,3 +572,65 @@ void ovs_vport_deferred_free(struct vport *vport) call_rcu(&vport->rcu, free_vport_rcu); } +EXPORT_SYMBOL_GPL(ovs_vport_deferred_free); + +int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info, + struct net *net, + const struct ovs_tunnel_info *tun_info, + u8 ipproto, + u32 skb_mark, + __be16 tp_src, + __be16 tp_dst) +{ + const struct ovs_key_ipv4_tunnel *tun_key; + struct rtable *rt; + struct flowi4 fl; + + if (unlikely(!tun_info)) + return -EINVAL; + + tun_key = &tun_info->tunnel; + + /* Route lookup to get srouce IP address. + * The process may need to be changed if the corresponding process + * in vports ops changed. + */ + memset(&fl, 0, sizeof(fl)); + fl.daddr = tun_key->ipv4_dst; + fl.saddr = tun_key->ipv4_src; + fl.flowi4_tos = RT_TOS(tun_key->ipv4_tos); + fl.flowi4_mark = skb_mark; + fl.flowi4_proto = IPPROTO_GRE; + + rt = ip_route_output_key(net, &fl); + if (IS_ERR(rt)) + return PTR_ERR(rt); + + ip_rt_put(rt); + + /* Generate egress_tun_info based on tun_info, + * saddr, tp_src and tp_dst + */ + __ovs_flow_tun_info_init(egress_tun_info, + fl.saddr, tun_key->ipv4_dst, + tun_key->ipv4_tos, + tun_key->ipv4_ttl, + tp_src, tp_dst, + tun_key->tun_id, + tun_key->tun_flags, + tun_info->options, + tun_info->options_len); + + return 0; +} +EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info); + +int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *info) +{ + /* get_egress_tun_info() is only implemented on tunnel ports. */ + if (unlikely(!vport->ops->get_egress_tun_info)) + return -EINVAL; + + return vport->ops->get_egress_tun_info(vport, skb, info); +} diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index 8942125de3a6..99c8e71d9e6c 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -45,19 +45,29 @@ void ovs_vport_exit(void); struct vport *ovs_vport_add(const struct vport_parms *); void ovs_vport_del(struct vport *); -struct vport *ovs_vport_locate(struct net *net, const char *name); +struct vport *ovs_vport_locate(const struct net *net, const char *name); void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *); int ovs_vport_set_options(struct vport *, struct nlattr *options); int ovs_vport_get_options(const struct vport *, struct sk_buff *); -int ovs_vport_set_upcall_portids(struct vport *, struct nlattr *pids); +int ovs_vport_set_upcall_portids(struct vport *, const struct nlattr *pids); int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *); u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *); int ovs_vport_send(struct vport *, struct sk_buff *); +int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info, + struct net *net, + const struct ovs_tunnel_info *tun_info, + u8 ipproto, + u32 skb_mark, + __be16 tp_src, + __be16 tp_dst); +int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb, + struct ovs_tunnel_info *info); + /* The following definitions are for implementers of vport devices: */ struct vport_err_stats { @@ -146,6 +156,8 @@ struct vport_parms { * @get_name: Get the device's name. * @send: Send a packet on the device. Returns the length of the packet sent, * zero for dropped packets or negative for error. + * @get_egress_tun_info: Get the egress tunnel 5-tuple and other info for + * a packet. */ struct vport_ops { enum ovs_vport_type type; @@ -161,6 +173,11 @@ struct vport_ops { const char *(*get_name)(const struct vport *); int (*send)(struct vport *, struct sk_buff *); + int (*get_egress_tun_info)(struct vport *, struct sk_buff *, + struct ovs_tunnel_info *); + + struct module *owner; + struct list_head list; }; enum vport_err_type { @@ -207,15 +224,7 @@ static inline struct vport *vport_from_priv(void *priv) } void ovs_vport_receive(struct vport *, struct sk_buff *, - struct ovs_tunnel_info *); - -/* List of statically compiled vport implementations. Don't forget to also - * add yours to the list at the top of vport.c. */ -extern const struct vport_ops ovs_netdev_vport_ops; -extern const struct vport_ops ovs_internal_vport_ops; -extern const struct vport_ops ovs_gre_vport_ops; -extern const struct vport_ops ovs_vxlan_vport_ops; -extern const struct vport_ops ovs_geneve_vport_ops; + const struct ovs_tunnel_info *); static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len) @@ -224,4 +233,7 @@ static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb, skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); } +int ovs_vport_ops_register(struct vport_ops *ops); +void ovs_vport_ops_unregister(struct vport_ops *ops); + #endif /* vport.h */ diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 87d20f48ff06..58af58026dd2 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2095,6 +2095,18 @@ static void tpacket_destruct_skb(struct sk_buff *skb) sock_wfree(skb); } +static bool ll_header_truncated(const struct net_device *dev, int len) +{ + /* net device doesn't like empty head */ + if (unlikely(len <= dev->hard_header_len)) { + net_warn_ratelimited("%s: packet size is too short (%d < %d)\n", + current->comm, len, dev->hard_header_len); + return true; + } + + return false; +} + static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, int size_max, __be16 proto, unsigned char *addr, int hlen) @@ -2170,12 +2182,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, if (unlikely(err < 0)) return -EINVAL; } else if (dev->hard_header_len) { - /* net device doesn't like empty head */ - if (unlikely(tp_len <= dev->hard_header_len)) { - pr_err("packet size is too short (%d < %d)\n", - tp_len, dev->hard_header_len); + if (ll_header_truncated(dev, tp_len)) return -EINVAL; - } skb_push(skb, dev->hard_header_len); err = skb_store_bits(skb, 0, data, @@ -2500,9 +2508,14 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb_set_network_header(skb, reserve); err = -EINVAL; - if (sock->type == SOCK_DGRAM && - (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0) - goto out_free; + if (sock->type == SOCK_DGRAM) { + offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); + if (unlikely(offset) < 0) + goto out_free; + } else { + if (ll_header_truncated(dev, len)) + goto out_free; + } /* Returns -EFAULT on error */ err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); @@ -2953,7 +2966,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto out_free; diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c index 5a940dbd74a3..32ab87d34828 100644 --- a/net/phonet/af_phonet.c +++ b/net/phonet/af_phonet.c @@ -426,16 +426,17 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev, out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa)); if (!out_dev) { - LIMIT_NETDEBUG(KERN_WARNING"No Phonet route to %02X\n", - pn_sockaddr_get_addr(&sa)); + net_dbg_ratelimited("No Phonet route to %02X\n", + pn_sockaddr_get_addr(&sa)); goto out; } __skb_push(skb, sizeof(struct phonethdr)); skb->dev = out_dev; if (out_dev == dev) { - LIMIT_NETDEBUG(KERN_ERR"Phonet loop to %02X on %s\n", - pn_sockaddr_get_addr(&sa), dev->name); + net_dbg_ratelimited("Phonet loop to %02X on %s\n", + pn_sockaddr_get_addr(&sa), + dev->name); goto out_dev; } /* Some drivers (e.g. TUN) do not allocate HW header space */ diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c index 290352c0e6b4..0918bc21eae6 100644 --- a/net/phonet/datagram.c +++ b/net/phonet/datagram.c @@ -150,7 +150,7 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk, copylen = len; } - rval = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copylen); + rval = skb_copy_datagram_msg(skb, 0, msg, copylen); if (rval) { rval = -EFAULT; goto out; diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c index e9a83a637185..fa8237fdc57b 100644 --- a/net/phonet/pep-gprs.c +++ b/net/phonet/pep-gprs.c @@ -203,8 +203,7 @@ static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev) len = skb->len; err = pep_write(sk, skb); if (err) { - LIMIT_NETDEBUG(KERN_WARNING"%s: TX error (%d)\n", - dev->name, err); + net_dbg_ratelimited("%s: TX error (%d)\n", dev->name, err); dev->stats.tx_aborted_errors++; dev->stats.tx_errors++; } else { diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 70a547ea5177..9cd069dfaf65 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -272,8 +272,8 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) hdr = pnp_hdr(skb); if (hdr->data[0] != PN_PEP_TYPE_COMMON) { - LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", - (unsigned int)hdr->data[0]); + net_dbg_ratelimited("Phonet unknown PEP type: %u\n", + (unsigned int)hdr->data[0]); return -EOPNOTSUPP; } @@ -304,8 +304,8 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) break; default: - LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n", - (unsigned int)hdr->data[1]); + net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", + (unsigned int)hdr->data[1]); return -EOPNOTSUPP; } if (wake) @@ -451,8 +451,8 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb) break; default: - LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n", - hdr->message_id); + net_dbg_ratelimited("Phonet unknown PEP message: %u\n", + hdr->message_id); err = -EINVAL; } out: @@ -1296,7 +1296,7 @@ copy: else len = skb->len; - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len); + err = skb_copy_datagram_msg(skb, 0, msg, len); if (!err) err = (flags & MSG_TRUNC) ? skb->len : len; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index a85c1a086ae4..9b600c20a7a3 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1249,7 +1249,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + skb_copy_datagram_msg(skb, 0, msg, copied); if (msg->msg_name) { struct sockaddr_rose *srose; diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index e9aaa65c0778..4575485ad1b4 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -180,7 +180,7 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, if (copy > len - copied) copy = len - copied; - ret = skb_copy_datagram_iovec(skb, offset, msg->msg_iov, copy); + ret = skb_copy_datagram_msg(skb, offset, msg, copy); if (ret < 0) goto copy_error; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index a1a8e29e5fc9..88618f8b794c 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -686,6 +686,17 @@ config NET_ACT_CSUM To compile this code as a module, choose M here: the module will be called act_csum. +config NET_ACT_VLAN + tristate "Vlan manipulation" + depends on NET_CLS_ACT + ---help--- + Say Y here to push or pop vlan headers. + + If unsure, say N. + + To compile this code as a module, choose M here: the + module will be called act_vlan. + config NET_CLS_IND bool "Incoming device classification" depends on NET_CLS_U32 || NET_CLS_FW diff --git a/net/sched/Makefile b/net/sched/Makefile index 0a869a11f3e6..679f24ae7f93 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o +obj-$(CONFIG_NET_ACT_VLAN) += act_vlan.o obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index d6bcbd9f7791..7fffc2272701 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -1,5 +1,5 @@ /* - * net/sched/gact.c Generic actions + * net/sched/act_gact.c Generic actions * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 8a64a0734aee..cbc8dd7dd48a 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -1,5 +1,5 @@ /* - * net/sched/ipt.c iptables target interface + * net/sched/act_ipt.c iptables target interface * *TODO: Add other tables. For now we only support the ipv4 table targets * diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index eb48306033d9..5953517ec059 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -1,5 +1,5 @@ /* - * net/sched/mirred.c packet mirroring and redirect actions + * net/sched/act_mirred.c packet mirroring and redirect actions * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 5f9bcb2e080b..59649d588d79 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -1,5 +1,5 @@ /* - * net/sched/pedit.c Generic packet editor + * net/sched/act_pedit.c Generic packet editor * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 69791ca77a05..9a1c42a43f92 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -1,5 +1,5 @@ /* - * net/sched/police.c Input police filter. + * net/sched/act_police.c Input police filter * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 992c2317ce88..6a8d9488613a 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -1,5 +1,5 @@ /* - * net/sched/simp.c Simple example of an action + * net/sched/act_simple.c Simple example of an action * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c new file mode 100644 index 000000000000..d735ecf0b1a7 --- /dev/null +++ b/net/sched/act_vlan.c @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/rtnetlink.h> +#include <linux/if_vlan.h> +#include <net/netlink.h> +#include <net/pkt_sched.h> + +#include <linux/tc_act/tc_vlan.h> +#include <net/tc_act/tc_vlan.h> + +#define VLAN_TAB_MASK 15 + +static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, + struct tcf_result *res) +{ + struct tcf_vlan *v = a->priv; + int action; + int err; + + spin_lock(&v->tcf_lock); + v->tcf_tm.lastuse = jiffies; + bstats_update(&v->tcf_bstats, skb); + action = v->tcf_action; + + switch (v->tcfv_action) { + case TCA_VLAN_ACT_POP: + err = skb_vlan_pop(skb); + if (err) + goto drop; + break; + case TCA_VLAN_ACT_PUSH: + err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid); + if (err) + goto drop; + break; + default: + BUG(); + } + + goto unlock; + +drop: + action = TC_ACT_SHOT; + v->tcf_qstats.drops++; +unlock: + spin_unlock(&v->tcf_lock); + return action; +} + +static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { + [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) }, + [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 }, + [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 }, +}; + +static int tcf_vlan_init(struct net *net, struct nlattr *nla, + struct nlattr *est, struct tc_action *a, + int ovr, int bind) +{ + struct nlattr *tb[TCA_VLAN_MAX + 1]; + struct tc_vlan *parm; + struct tcf_vlan *v; + int action; + __be16 push_vid = 0; + __be16 push_proto = 0; + int ret = 0; + int err; + + if (!nla) + return -EINVAL; + + err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy); + if (err < 0) + return err; + + if (!tb[TCA_VLAN_PARMS]) + return -EINVAL; + parm = nla_data(tb[TCA_VLAN_PARMS]); + switch (parm->v_action) { + case TCA_VLAN_ACT_POP: + break; + case TCA_VLAN_ACT_PUSH: + if (!tb[TCA_VLAN_PUSH_VLAN_ID]) + return -EINVAL; + push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]); + if (push_vid >= VLAN_VID_MASK) + return -ERANGE; + + if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) { + push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]); + switch (push_proto) { + case htons(ETH_P_8021Q): + case htons(ETH_P_8021AD): + break; + default: + return -EPROTONOSUPPORT; + } + } else { + push_proto = htons(ETH_P_8021Q); + } + break; + default: + return -EINVAL; + } + action = parm->v_action; + + if (!tcf_hash_check(parm->index, a, bind)) { + ret = tcf_hash_create(parm->index, est, a, sizeof(*v), bind); + if (ret) + return ret; + + ret = ACT_P_CREATED; + } else { + if (bind) + return 0; + tcf_hash_release(a, bind); + if (!ovr) + return -EEXIST; + } + + v = to_vlan(a); + + spin_lock_bh(&v->tcf_lock); + + v->tcfv_action = action; + v->tcfv_push_vid = push_vid; + v->tcfv_push_proto = push_proto; + + v->tcf_action = parm->action; + + spin_unlock_bh(&v->tcf_lock); + + if (ret == ACT_P_CREATED) + tcf_hash_insert(a); + return ret; +} + +static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, + int bind, int ref) +{ + unsigned char *b = skb_tail_pointer(skb); + struct tcf_vlan *v = a->priv; + struct tc_vlan opt = { + .index = v->tcf_index, + .refcnt = v->tcf_refcnt - ref, + .bindcnt = v->tcf_bindcnt - bind, + .action = v->tcf_action, + .v_action = v->tcfv_action, + }; + struct tcf_t t; + + if (nla_put(skb, TCA_VLAN_PARMS, sizeof(opt), &opt)) + goto nla_put_failure; + + if (v->tcfv_action == TCA_VLAN_ACT_PUSH && + (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) || + nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, v->tcfv_push_proto))) + goto nla_put_failure; + + t.install = jiffies_to_clock_t(jiffies - v->tcf_tm.install); + t.lastuse = jiffies_to_clock_t(jiffies - v->tcf_tm.lastuse); + t.expires = jiffies_to_clock_t(v->tcf_tm.expires); + if (nla_put(skb, TCA_VLAN_TM, sizeof(t), &t)) + goto nla_put_failure; + return skb->len; + +nla_put_failure: + nlmsg_trim(skb, b); + return -1; +} + +static struct tc_action_ops act_vlan_ops = { + .kind = "vlan", + .type = TCA_ACT_VLAN, + .owner = THIS_MODULE, + .act = tcf_vlan, + .dump = tcf_vlan_dump, + .init = tcf_vlan_init, +}; + +static int __init vlan_init_module(void) +{ + return tcf_register_action(&act_vlan_ops, VLAN_TAB_MASK); +} + +static void __exit vlan_cleanup_module(void) +{ + tcf_unregister_action(&act_vlan_ops); +} + +module_init(vlan_init_module); +module_exit(vlan_cleanup_module); + +MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>"); +MODULE_DESCRIPTION("vlan manipulation actions"); +MODULE_LICENSE("GPL v2"); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2cf61b3e633c..76f402e05bd6 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -947,7 +947,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue, if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { if (qdisc_is_percpu_stats(sch)) { sch->cpu_bstats = - alloc_percpu(struct gnet_stats_basic_cpu); + netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu); if (!sch->cpu_bstats) goto err_out4; diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index b34331967e02..179f1c8c0d8b 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -139,33 +139,20 @@ struct netem_sched_data { /* Time stamp put into socket buffer control block * Only valid when skbs are in our internal t(ime)fifo queue. + * + * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, + * and skb->next & skb->prev are scratch space for a qdisc, + * we save skb->tstamp value in skb->cb[] before destroying it. */ struct netem_skb_cb { psched_time_t time_to_send; ktime_t tstamp_save; }; -/* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp - * to hold a rb_node structure. - * - * If struct sk_buff layout is changed, the following checks will complain. - */ -static struct rb_node *netem_rb_node(struct sk_buff *skb) -{ - BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0); - BUILD_BUG_ON(offsetof(struct sk_buff, prev) != - offsetof(struct sk_buff, next) + sizeof(skb->next)); - BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) != - offsetof(struct sk_buff, prev) + sizeof(skb->prev)); - BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) + - sizeof(skb->prev) + - sizeof(skb->tstamp)); - return (struct rb_node *)&skb->next; -} static struct sk_buff *netem_rb_to_skb(struct rb_node *rb) { - return (struct sk_buff *)rb; + return container_of(rb, struct sk_buff, rbnode); } static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) @@ -403,8 +390,8 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) else p = &parent->rb_left; } - rb_link_node(netem_rb_node(nskb), parent, p); - rb_insert_color(netem_rb_node(nskb), &q->t_root); + rb_link_node(&nskb->rbnode, parent, p); + rb_insert_color(&nskb->rbnode, &q->t_root); sch->q.qlen++; } diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c index 33d7a98a7a97..b783a446d884 100644 --- a/net/sched/sch_pie.c +++ b/net/sched/sch_pie.c @@ -445,7 +445,6 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt) sch->limit = q->params.limit; setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); - mod_timer(&q->adapt_timer, jiffies + HZ / 2); if (opt) { int err = pie_change(sch, opt); @@ -454,6 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt) return err; } + mod_timer(&q->adapt_timer, jiffies + HZ / 2); return 0; } diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 0e8529113dc5..fb7976aee61c 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, list_add(&cur_key->key_list, sh_keys); cur_key->key = key; - sctp_auth_key_hold(key); - return 0; nomem: if (!replace) diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 34229ee7f379..0697eda5aed8 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -417,7 +417,7 @@ static void *sctp_remaddr_seq_start(struct seq_file *seq, loff_t *pos) if (*pos == 0) seq_printf(seq, "ADDR ASSOC_ID HB_ACT RTO MAX_PATH_RTX " - "REM_ADDR_RTX START\n"); + "REM_ADDR_RTX START STATE\n"); return (void *)pos; } @@ -490,14 +490,20 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) * Note: We don't have a way to tally this at the moment * so lets just leave it as zero for the moment */ - seq_printf(seq, "0 "); + seq_puts(seq, "0 "); /* * remote address start time (START). This is also not * currently implemented, but we can record it with a * jiffies marker in a subsequent patch */ - seq_printf(seq, "0"); + seq_puts(seq, "0 "); + + /* + * The current state of this destination. I.e. + * SCTP_ACTIVE, SCTP_INACTIVE, ... + */ + seq_printf(seq, "%d", tsp->state); seq_printf(seq, "\n"); } diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ab734be8cb20..9f32741abb1c 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2609,6 +2609,9 @@ do_addr_param: addr_param = param.v + sizeof(sctp_addip_param_t); af = sctp_get_af_specific(param_type2af(param.p->type)); + if (af == NULL) + break; + af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 634a2abb5f3a..85e0b653edd7 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -162,7 +162,7 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) chunk->skb->destructor = sctp_wfree; /* Save the chunk pointer in skb for sctp_wfree to use later. */ - *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; + skb_shinfo(chunk->skb)->destructor_arg = chunk; asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + @@ -2095,7 +2095,7 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, if (copied > len) copied = len; - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + err = skb_copy_datagram_msg(skb, 0, msg, copied); event = sctp_skb2event(skb); @@ -6870,14 +6870,10 @@ static void sctp_wake_up_waiters(struct sock *sk, */ static void sctp_wfree(struct sk_buff *skb) { - struct sctp_association *asoc; - struct sctp_chunk *chunk; - struct sock *sk; + struct sctp_chunk *chunk = skb_shinfo(skb)->destructor_arg; + struct sctp_association *asoc = chunk->asoc; + struct sock *sk = asoc->base.sk; - /* Get the saved chunk pointer. */ - chunk = *((struct sctp_chunk **)(skb->cb)); - asoc = chunk->asoc; - sk = asoc->base.sk; asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk) + sizeof(struct sk_buff) + sizeof(struct sctp_chunk); diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index d49dc2ed30ad..ce469d648ffb 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -205,9 +205,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto out_free; - if (!sctp_ulpevent_is_notification(event)) + if (!sctp_ulpevent_is_notification(event)) { sk_mark_napi_id(sk, skb); - + sk_incoming_cpu_update(sk); + } /* Check if the user wishes to receive this event. */ if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) goto out_free; diff --git a/net/socket.c b/net/socket.c index fe20c319a0bb..ee3ee39eefa5 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1988,13 +1988,26 @@ struct used_address { unsigned int name_len; }; -static int copy_msghdr_from_user(struct msghdr *kmsg, - struct msghdr __user *umsg) +static ssize_t copy_msghdr_from_user(struct msghdr *kmsg, + struct user_msghdr __user *umsg, + struct sockaddr __user **save_addr, + struct iovec **iov) { - if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) + struct sockaddr __user *uaddr; + struct iovec __user *uiov; + ssize_t err; + + if (!access_ok(VERIFY_READ, umsg, sizeof(*umsg)) || + __get_user(uaddr, &umsg->msg_name) || + __get_user(kmsg->msg_namelen, &umsg->msg_namelen) || + __get_user(uiov, &umsg->msg_iov) || + __get_user(kmsg->msg_iovlen, &umsg->msg_iovlen) || + __get_user(kmsg->msg_control, &umsg->msg_control) || + __get_user(kmsg->msg_controllen, &umsg->msg_controllen) || + __get_user(kmsg->msg_flags, &umsg->msg_flags)) return -EFAULT; - if (kmsg->msg_name == NULL) + if (!uaddr) kmsg->msg_namelen = 0; if (kmsg->msg_namelen < 0) @@ -2002,10 +2015,34 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); - return 0; + + if (save_addr) + *save_addr = uaddr; + + if (uaddr && kmsg->msg_namelen) { + if (!save_addr) { + err = move_addr_to_kernel(uaddr, kmsg->msg_namelen, + kmsg->msg_name); + if (err < 0) + return err; + } + } else { + kmsg->msg_name = NULL; + kmsg->msg_namelen = 0; + } + + if (kmsg->msg_iovlen > UIO_MAXIOV) + return -EMSGSIZE; + + err = rw_copy_check_uvector(save_addr ? READ : WRITE, + uiov, kmsg->msg_iovlen, + UIO_FASTIOV, *iov, iov); + if (err >= 0) + kmsg->msg_iov = *iov; + return err; } -static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, +static int ___sys_sendmsg(struct socket *sock, struct user_msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, struct used_address *used_address) { @@ -2017,34 +2054,15 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, __attribute__ ((aligned(sizeof(__kernel_size_t)))); /* 20 is size of ipv6_pktinfo */ unsigned char *ctl_buf = ctl; - int err, ctl_len, total_len; - - err = -EFAULT; - if (MSG_CMSG_COMPAT & flags) { - if (get_compat_msghdr(msg_sys, msg_compat)) - return -EFAULT; - } else { - err = copy_msghdr_from_user(msg_sys, msg); - if (err) - return err; - } + int ctl_len, total_len; + ssize_t err; - if (msg_sys->msg_iovlen > UIO_FASTIOV) { - err = -EMSGSIZE; - if (msg_sys->msg_iovlen > UIO_MAXIOV) - goto out; - err = -ENOMEM; - iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), - GFP_KERNEL); - if (!iov) - goto out; - } + msg_sys->msg_name = &address; - /* This will also move the address data into kernel space */ - if (MSG_CMSG_COMPAT & flags) { - err = verify_compat_iovec(msg_sys, iov, &address, VERIFY_READ); - } else - err = verify_iovec(msg_sys, iov, &address, VERIFY_READ); + if (MSG_CMSG_COMPAT & flags) + err = get_compat_msghdr(msg_sys, msg_compat, NULL, &iov); + else + err = copy_msghdr_from_user(msg_sys, msg, NULL, &iov); if (err < 0) goto out_freeiov; total_len = err; @@ -2115,7 +2133,6 @@ out_freectl: out_freeiov: if (iov != iovstack) kfree(iov); -out: return err; } @@ -2123,7 +2140,7 @@ out: * BSD sendmsg interface */ -long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) +long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; @@ -2140,7 +2157,7 @@ out: return err; } -SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) +SYSCALL_DEFINE3(sendmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) return -EINVAL; @@ -2177,7 +2194,7 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { - err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, + err = ___sys_sendmsg(sock, (struct user_msghdr __user *)compat_entry, &msg_sys, flags, &used_address); if (err < 0) break; @@ -2185,7 +2202,7 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, ++compat_entry; } else { err = ___sys_sendmsg(sock, - (struct msghdr __user *)entry, + (struct user_msghdr __user *)entry, &msg_sys, flags, &used_address); if (err < 0) break; @@ -2215,7 +2232,7 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, return __sys_sendmmsg(fd, mmsg, vlen, flags); } -static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, +static int ___sys_recvmsg(struct socket *sock, struct user_msghdr __user *msg, struct msghdr *msg_sys, unsigned int flags, int nosec) { struct compat_msghdr __user *msg_compat = @@ -2223,44 +2240,22 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; - int err, total_len, len; + int total_len, len; + ssize_t err; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; - int __user *uaddr_len; + int __user *uaddr_len = COMPAT_NAMELEN(msg); - if (MSG_CMSG_COMPAT & flags) { - if (get_compat_msghdr(msg_sys, msg_compat)) - return -EFAULT; - } else { - err = copy_msghdr_from_user(msg_sys, msg); - if (err) - return err; - } - - if (msg_sys->msg_iovlen > UIO_FASTIOV) { - err = -EMSGSIZE; - if (msg_sys->msg_iovlen > UIO_MAXIOV) - goto out; - err = -ENOMEM; - iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec), - GFP_KERNEL); - if (!iov) - goto out; - } + msg_sys->msg_name = &addr; - /* Save the user-mode address (verify_iovec will change the - * kernel msghdr to use the kernel address space) - */ - uaddr = (__force void __user *)msg_sys->msg_name; - uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) - err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE); + err = get_compat_msghdr(msg_sys, msg_compat, &uaddr, &iov); else - err = verify_iovec(msg_sys, iov, &addr, VERIFY_WRITE); + err = copy_msghdr_from_user(msg_sys, msg, &uaddr, &iov); if (err < 0) goto out_freeiov; total_len = err; @@ -2303,7 +2298,6 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, out_freeiov: if (iov != iovstack) kfree(iov); -out: return err; } @@ -2311,7 +2305,7 @@ out: * BSD recvmsg interface */ -long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) +long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; @@ -2328,7 +2322,7 @@ out: return err; } -SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, +SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg, unsigned int, flags) { if (flags & MSG_CMSG_COMPAT) @@ -2373,7 +2367,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { - err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, + err = ___sys_recvmsg(sock, (struct user_msghdr __user *)compat_entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) @@ -2382,7 +2376,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, ++compat_entry; } else { err = ___sys_recvmsg(sock, - (struct msghdr __user *)entry, + (struct user_msghdr __user *)entry, &msg_sys, flags & ~MSG_WAITFORONE, datagrams); if (err < 0) @@ -2571,13 +2565,13 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) (int __user *)a[4]); break; case SYS_SENDMSG: - err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); + err = sys_sendmsg(a0, (struct user_msghdr __user *)a1, a[2]); break; case SYS_SENDMMSG: err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]); break; case SYS_RECVMSG: - err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); + err = sys_recvmsg(a0, (struct user_msghdr __user *)a1, a[2]); break; case SYS_RECVMMSG: err = sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3], diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index b8670bf262e2..dcf3589e3cc5 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -767,6 +767,117 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action) tipc_bclink_unlock(); } +int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, struct tipc_stats *stats) +{ + int i; + struct nlattr *nest; + + struct nla_map { + __u32 key; + __u32 val; + }; + + struct nla_map map[] = { + {TIPC_NLA_STATS_RX_INFO, stats->recv_info}, + {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments}, + {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented}, + {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles}, + {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled}, + {TIPC_NLA_STATS_TX_INFO, stats->sent_info}, + {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments}, + {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented}, + {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles}, + {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled}, + {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks}, + {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv}, + {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks}, + {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks}, + {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted}, + {TIPC_NLA_STATS_DUPLICATES, stats->duplicates}, + {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs}, + {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz}, + {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ? + (stats->accu_queue_sz / stats->queue_sz_counts) : 0} + }; + + nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS); + if (!nest) + return -EMSGSIZE; + + for (i = 0; i < ARRAY_SIZE(map); i++) + if (nla_put_u32(skb, map[i].key, map[i].val)) + goto msg_full; + + nla_nest_end(skb, nest); + + return 0; +msg_full: + nla_nest_cancel(skb, nest); + + return -EMSGSIZE; +} + +int tipc_nl_add_bc_link(struct tipc_nl_msg *msg) +{ + int err; + void *hdr; + struct nlattr *attrs; + struct nlattr *prop; + + if (!bcl) + return 0; + + tipc_bclink_lock(); + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_LINK_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); + if (!attrs) + goto msg_full; + + /* The broadcast link is always up */ + if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) + goto attr_msg_full; + + if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST)) + goto attr_msg_full; + if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no)) + goto attr_msg_full; + + prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); + if (!prop) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0])) + goto prop_msg_full; + nla_nest_end(msg->skb, prop); + + err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats); + if (err) + goto attr_msg_full; + + tipc_bclink_unlock(); + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +prop_msg_full: + nla_nest_cancel(msg->skb, prop); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + tipc_bclink_unlock(); + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} int tipc_bclink_stats(char *buf, const u32 buf_size) { diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index e7b0f85a82bc..443de084d3e8 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -37,6 +37,8 @@ #ifndef _TIPC_BCAST_H #define _TIPC_BCAST_H +#include "netlink.h" + #define MAX_NODES 4096 #define WSIZE 32 #define TIPC_BCLINK_RESET 1 @@ -100,4 +102,6 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); uint tipc_bclink_get_mtu(void); int tipc_bclink_xmit(struct sk_buff *buf); void tipc_bclink_wakeup_users(void); +int tipc_nl_add_bc_link(struct tipc_nl_msg *msg); + #endif diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 264474394f9f..5f6f32369c16 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -1,7 +1,7 @@ /* * net/tipc/bearer.c: TIPC bearer code * - * Copyright (c) 1996-2006, 2013, Ericsson AB + * Copyright (c) 1996-2006, 2013-2014, Ericsson AB * Copyright (c) 2004-2006, 2010-2013, Wind River Systems * All rights reserved. * @@ -37,6 +37,7 @@ #include "core.h" #include "config.h" #include "bearer.h" +#include "link.h" #include "discover.h" #define MAX_ADDR_STR 60 @@ -49,6 +50,23 @@ static struct tipc_media * const media_info_array[] = { NULL }; +static const struct nla_policy +tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = { + [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_BEARER_NAME] = { + .type = NLA_STRING, + .len = TIPC_MAX_BEARER_NAME + }, + [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED }, + [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 } +}; + +static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = { + [TIPC_NLA_MEDIA_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_MEDIA_NAME] = { .type = NLA_STRING }, + [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED } +}; + struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1]; static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down); @@ -627,3 +645,428 @@ void tipc_bearer_stop(void) } } } + +/* Caller should hold rtnl_lock to protect the bearer */ +int __tipc_nl_add_bearer(struct tipc_nl_msg *msg, struct tipc_bearer *bearer) +{ + void *hdr; + struct nlattr *attrs; + struct nlattr *prop; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_BEARER_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_BEARER); + if (!attrs) + goto msg_full; + + if (nla_put_string(msg->skb, TIPC_NLA_BEARER_NAME, bearer->name)) + goto attr_msg_full; + + prop = nla_nest_start(msg->skb, TIPC_NLA_BEARER_PROP); + if (!prop) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, bearer->priority)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, bearer->tolerance)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->window)) + goto prop_msg_full; + + nla_nest_end(msg->skb, prop); + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +prop_msg_full: + nla_nest_cancel(msg->skb, prop); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int i = cb->args[0]; + struct tipc_bearer *bearer; + struct tipc_nl_msg msg; + + if (i == MAX_BEARERS) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rtnl_lock(); + for (i = 0; i < MAX_BEARERS; i++) { + bearer = rtnl_dereference(bearer_list[i]); + if (!bearer) + continue; + + err = __tipc_nl_add_bearer(&msg, bearer); + if (err) + break; + } + rtnl_unlock(); + + cb->args[0] = i; + return skb->len; +} + +int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct sk_buff *rep; + struct tipc_bearer *bearer; + struct tipc_nl_msg msg; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!rep) + return -ENOMEM; + + msg.skb = rep; + msg.portid = info->snd_portid; + msg.seq = info->snd_seq; + + rtnl_lock(); + bearer = tipc_bearer_find(name); + if (!bearer) { + err = -EINVAL; + goto err_out; + } + + err = __tipc_nl_add_bearer(&msg, bearer); + if (err) + goto err_out; + rtnl_unlock(); + + return genlmsg_reply(rep, info); +err_out: + rtnl_unlock(); + nlmsg_free(rep); + + return err; +} + +int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_bearer *bearer; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + rtnl_lock(); + bearer = tipc_bearer_find(name); + if (!bearer) { + rtnl_unlock(); + return -EINVAL; + } + + bearer_disable(bearer, false); + rtnl_unlock(); + + return 0; +} + +int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *bearer; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + u32 domain; + u32 prio; + + prio = TIPC_MEDIA_LINK_PRI; + domain = tipc_own_addr & TIPC_CLUSTER_MASK; + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + + bearer = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + if (attrs[TIPC_NLA_BEARER_DOMAIN]) + domain = nla_get_u32(attrs[TIPC_NLA_BEARER_DOMAIN]); + + if (attrs[TIPC_NLA_BEARER_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + + err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], + props); + if (err) + return err; + + if (props[TIPC_NLA_PROP_PRIO]) + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + } + + rtnl_lock(); + err = tipc_enable_bearer(bearer, domain, prio); + if (err) { + rtnl_unlock(); + return err; + } + rtnl_unlock(); + + return 0; +} + +int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_bearer *b; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_BEARER]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX, + info->attrs[TIPC_NLA_BEARER], + tipc_nl_bearer_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_BEARER_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + rtnl_lock(); + b = tipc_bearer_find(name); + if (!b) { + rtnl_unlock(); + return -EINVAL; + } + + if (attrs[TIPC_NLA_BEARER_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + + err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], + props); + if (err) { + rtnl_unlock(); + return err; + } + + if (props[TIPC_NLA_PROP_TOL]) + b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); + if (props[TIPC_NLA_PROP_PRIO]) + b->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + if (props[TIPC_NLA_PROP_WIN]) + b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + } + rtnl_unlock(); + + return 0; +} + +int __tipc_nl_add_media(struct tipc_nl_msg *msg, struct tipc_media *media) +{ + void *hdr; + struct nlattr *attrs; + struct nlattr *prop; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_MEDIA_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_MEDIA); + if (!attrs) + goto msg_full; + + if (nla_put_string(msg->skb, TIPC_NLA_MEDIA_NAME, media->name)) + goto attr_msg_full; + + prop = nla_nest_start(msg->skb, TIPC_NLA_MEDIA_PROP); + if (!prop) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, media->priority)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, media->tolerance)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->window)) + goto prop_msg_full; + + nla_nest_end(msg->skb, prop); + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +prop_msg_full: + nla_nest_cancel(msg->skb, prop); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int i = cb->args[0]; + struct tipc_nl_msg msg; + + if (i == MAX_MEDIA) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rtnl_lock(); + for (; media_info_array[i] != NULL; i++) { + err = __tipc_nl_add_media(&msg, media_info_array[i]); + if (err) + break; + } + rtnl_unlock(); + + cb->args[0] = i; + return skb->len; +} + +int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_nl_msg msg; + struct tipc_media *media; + struct sk_buff *rep; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_MEDIA]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX, + info->attrs[TIPC_NLA_MEDIA], + tipc_nl_media_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_MEDIA_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); + + rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!rep) + return -ENOMEM; + + msg.skb = rep; + msg.portid = info->snd_portid; + msg.seq = info->snd_seq; + + rtnl_lock(); + media = tipc_media_find(name); + if (!media) { + err = -EINVAL; + goto err_out; + } + + err = __tipc_nl_add_media(&msg, media); + if (err) + goto err_out; + rtnl_unlock(); + + return genlmsg_reply(rep, info); +err_out: + rtnl_unlock(); + nlmsg_free(rep); + + return err; +} + +int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *name; + struct tipc_media *m; + struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; + + if (!info->attrs[TIPC_NLA_MEDIA]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX, + info->attrs[TIPC_NLA_MEDIA], + tipc_nl_media_policy); + + if (!attrs[TIPC_NLA_MEDIA_NAME]) + return -EINVAL; + name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); + + rtnl_lock(); + m = tipc_media_find(name); + if (!m) { + rtnl_unlock(); + return -EINVAL; + } + + if (attrs[TIPC_NLA_MEDIA_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + + err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP], + props); + if (err) { + rtnl_unlock(); + return err; + } + + if (props[TIPC_NLA_PROP_TOL]) + m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); + if (props[TIPC_NLA_PROP_PRIO]) + m->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + if (props[TIPC_NLA_PROP_WIN]) + m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + } + rtnl_unlock(); + + return 0; +} diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h index 78fccc49de23..b1d905209e83 100644 --- a/net/tipc/bearer.h +++ b/net/tipc/bearer.h @@ -1,7 +1,7 @@ /* * net/tipc/bearer.h: Include file for TIPC bearer code * - * Copyright (c) 1996-2006, 2013, Ericsson AB + * Copyright (c) 1996-2006, 2013-2014, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -38,6 +38,8 @@ #define _TIPC_BEARER_H #include "bcast.h" +#include "netlink.h" +#include <net/genetlink.h> #define MAX_BEARERS 2 #define MAX_MEDIA 2 @@ -176,6 +178,16 @@ extern struct tipc_media eth_media_info; extern struct tipc_media ib_media_info; #endif +int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); + +int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); + int tipc_media_set_priority(const char *name, u32 new_value); int tipc_media_set_window(const char *name, u32 new_value); void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); diff --git a/net/tipc/core.h b/net/tipc/core.h index f773b148722f..b578b10feefa 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -41,6 +41,7 @@ #include <linux/tipc.h> #include <linux/tipc_config.h> +#include <linux/tipc_netlink.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/errno.h> diff --git a/net/tipc/link.c b/net/tipc/link.c index 1db162aa64a5..629e8cf393bf 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -36,10 +36,12 @@ #include "core.h" #include "link.h" +#include "bcast.h" #include "socket.h" #include "name_distr.h" #include "discover.h" #include "config.h" +#include "netlink.h" #include <linux/pkt_sched.h> @@ -50,6 +52,30 @@ static const char *link_co_err = "Link changeover error, "; static const char *link_rst_msg = "Resetting link "; static const char *link_unk_evt = "Unknown link event "; +static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { + [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_LINK_NAME] = { + .type = NLA_STRING, + .len = TIPC_MAX_LINK_NAME + }, + [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 }, + [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG }, + [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG }, + [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG }, + [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED }, + [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED }, + [TIPC_NLA_LINK_RX] = { .type = NLA_U32 }, + [TIPC_NLA_LINK_TX] = { .type = NLA_U32 } +}; + +/* Properties valid for media, bearar and link */ +static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { + [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 }, + [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 }, + [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 } +}; + /* * Out-of-range value for link session numbers */ @@ -224,9 +250,10 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, char addr_string[16]; u32 peer = n_ptr->addr; - if (n_ptr->link_cnt >= 2) { + if (n_ptr->link_cnt >= MAX_BEARERS) { tipc_addr_string_fill(addr_string, n_ptr->addr); - pr_err("Attempt to establish third link to %s\n", addr_string); + pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n", + n_ptr->link_cnt, addr_string, MAX_BEARERS); return NULL; } @@ -2375,3 +2402,433 @@ static void link_print(struct tipc_link *l_ptr, const char *str) else pr_cont("\n"); } + +/* Parse and validate nested (link) properties valid for media, bearer and link + */ +int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]) +{ + int err; + + err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop, + tipc_nl_prop_policy); + if (err) + return err; + + if (props[TIPC_NLA_PROP_PRIO]) { + u32 prio; + + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + if (prio > TIPC_MAX_LINK_PRI) + return -EINVAL; + } + + if (props[TIPC_NLA_PROP_TOL]) { + u32 tol; + + tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); + if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL)) + return -EINVAL; + } + + if (props[TIPC_NLA_PROP_WIN]) { + u32 win; + + win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN)) + return -EINVAL; + } + + return 0; +} + +int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) +{ + int err; + int res = 0; + int bearer_id; + char *name; + struct tipc_link *link; + struct tipc_node *node; + struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; + + if (!info->attrs[TIPC_NLA_LINK]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, + info->attrs[TIPC_NLA_LINK], + tipc_nl_link_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_LINK_NAME]) + return -EINVAL; + + name = nla_data(attrs[TIPC_NLA_LINK_NAME]); + + node = tipc_link_find_owner(name, &bearer_id); + if (!node) + return -EINVAL; + + tipc_node_lock(node); + + link = node->links[bearer_id]; + if (!link) { + res = -EINVAL; + goto out; + } + + if (attrs[TIPC_NLA_LINK_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + + err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], + props); + if (err) { + res = err; + goto out; + } + + if (props[TIPC_NLA_PROP_TOL]) { + u32 tol; + + tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); + link_set_supervision_props(link, tol); + tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0); + } + if (props[TIPC_NLA_PROP_PRIO]) { + u32 prio; + + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + link->priority = prio; + tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0); + } + if (props[TIPC_NLA_PROP_WIN]) { + u32 win; + + win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + tipc_link_set_queue_limits(link, win); + } + } + +out: + tipc_node_unlock(node); + + return res; +} +int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s) +{ + int i; + struct nlattr *stats; + + struct nla_map { + u32 key; + u32 val; + }; + + struct nla_map map[] = { + {TIPC_NLA_STATS_RX_INFO, s->recv_info}, + {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments}, + {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented}, + {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles}, + {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled}, + {TIPC_NLA_STATS_TX_INFO, s->sent_info}, + {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments}, + {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented}, + {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles}, + {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled}, + {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ? + s->msg_length_counts : 1}, + {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts}, + {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total}, + {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]}, + {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]}, + {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]}, + {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]}, + {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]}, + {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]}, + {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]}, + {TIPC_NLA_STATS_RX_STATES, s->recv_states}, + {TIPC_NLA_STATS_RX_PROBES, s->recv_probes}, + {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks}, + {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv}, + {TIPC_NLA_STATS_TX_STATES, s->sent_states}, + {TIPC_NLA_STATS_TX_PROBES, s->sent_probes}, + {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks}, + {TIPC_NLA_STATS_TX_ACKS, s->sent_acks}, + {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted}, + {TIPC_NLA_STATS_DUPLICATES, s->duplicates}, + {TIPC_NLA_STATS_LINK_CONGS, s->link_congs}, + {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz}, + {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ? + (s->accu_queue_sz / s->queue_sz_counts) : 0} + }; + + stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS); + if (!stats) + return -EMSGSIZE; + + for (i = 0; i < ARRAY_SIZE(map); i++) + if (nla_put_u32(skb, map[i].key, map[i].val)) + goto msg_full; + + nla_nest_end(skb, stats); + + return 0; +msg_full: + nla_nest_cancel(skb, stats); + + return -EMSGSIZE; +} + +/* Caller should hold appropriate locks to protect the link */ +int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link) +{ + int err; + void *hdr; + struct nlattr *attrs; + struct nlattr *prop; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_LINK_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK); + if (!attrs) + goto msg_full; + + if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, + tipc_cluster_mask(tipc_own_addr))) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no)) + goto attr_msg_full; + + if (tipc_link_is_up(link)) + if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP)) + goto attr_msg_full; + if (tipc_link_is_active(link)) + if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE)) + goto attr_msg_full; + + prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); + if (!prop) + goto attr_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, + link->queue_limit[TIPC_LOW_IMPORTANCE])) + goto prop_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) + goto prop_msg_full; + nla_nest_end(msg->skb, prop); + + err = __tipc_nl_add_stats(msg->skb, &link->stats); + if (err) + goto attr_msg_full; + + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +prop_msg_full: + nla_nest_cancel(msg->skb, prop); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +/* Caller should hold node lock */ +int __tipc_nl_add_node_links(struct tipc_nl_msg *msg, struct tipc_node *node, + u32 *prev_link) +{ + u32 i; + int err; + + for (i = *prev_link; i < MAX_BEARERS; i++) { + *prev_link = i; + + if (!node->links[i]) + continue; + + err = __tipc_nl_add_link(msg, node->links[i]); + if (err) + return err; + } + *prev_link = 0; + + return 0; +} + +int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct tipc_node *node; + struct tipc_nl_msg msg; + u32 prev_node = cb->args[0]; + u32 prev_link = cb->args[1]; + int done = cb->args[2]; + int err; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rcu_read_lock(); + + if (prev_node) { + node = tipc_node_find(prev_node); + if (!node) { + /* We never set seq or call nl_dump_check_consistent() + * this means that setting prev_seq here will cause the + * consistence check to fail in the netlink callback + * handler. Resulting in the last NLMSG_DONE message + * having the NLM_F_DUMP_INTR flag set. + */ + cb->prev_seq = 1; + goto out; + } + + list_for_each_entry_continue_rcu(node, &tipc_node_list, list) { + tipc_node_lock(node); + err = __tipc_nl_add_node_links(&msg, node, &prev_link); + tipc_node_unlock(node); + if (err) + goto out; + + prev_node = node->addr; + } + } else { + err = tipc_nl_add_bc_link(&msg); + if (err) + goto out; + + list_for_each_entry_rcu(node, &tipc_node_list, list) { + tipc_node_lock(node); + err = __tipc_nl_add_node_links(&msg, node, &prev_link); + tipc_node_unlock(node); + if (err) + goto out; + + prev_node = node->addr; + } + } + done = 1; +out: + rcu_read_unlock(); + + cb->args[0] = prev_node; + cb->args[1] = prev_link; + cb->args[2] = done; + + return skb->len; +} + +int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) +{ + struct sk_buff *ans_skb; + struct tipc_nl_msg msg; + struct tipc_link *link; + struct tipc_node *node; + char *name; + int bearer_id; + int err; + + if (!info->attrs[TIPC_NLA_LINK_NAME]) + return -EINVAL; + + name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); + node = tipc_link_find_owner(name, &bearer_id); + if (!node) + return -EINVAL; + + ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!ans_skb) + return -ENOMEM; + + msg.skb = ans_skb; + msg.portid = info->snd_portid; + msg.seq = info->snd_seq; + + tipc_node_lock(node); + link = node->links[bearer_id]; + if (!link) { + err = -EINVAL; + goto err_out; + } + + err = __tipc_nl_add_link(&msg, link); + if (err) + goto err_out; + + tipc_node_unlock(node); + + return genlmsg_reply(ans_skb, info); + +err_out: + tipc_node_unlock(node); + nlmsg_free(ans_skb); + + return err; +} + +int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) +{ + int err; + char *link_name; + unsigned int bearer_id; + struct tipc_link *link; + struct tipc_node *node; + struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; + + if (!info->attrs[TIPC_NLA_LINK]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX, + info->attrs[TIPC_NLA_LINK], + tipc_nl_link_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_LINK_NAME]) + return -EINVAL; + + link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); + + if (strcmp(link_name, tipc_bclink_name) == 0) { + err = tipc_bclink_reset_stats(); + if (err) + return err; + return 0; + } + + node = tipc_link_find_owner(link_name, &bearer_id); + if (!node) + return -EINVAL; + + tipc_node_lock(node); + + link = node->links[bearer_id]; + if (!link) { + tipc_node_unlock(node); + return -EINVAL; + } + + link_reset_statistics(link); + + tipc_node_unlock(node); + + return 0; +} diff --git a/net/tipc/link.h b/net/tipc/link.h index b567a3427fda..f463e7be801c 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -37,6 +37,7 @@ #ifndef _TIPC_LINK_H #define _TIPC_LINK_H +#include <net/genetlink.h> #include "msg.h" #include "node.h" @@ -239,6 +240,12 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *start, u32 retransmits); +int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]); + /* * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) */ diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 74745a47d72a..ec18076e81ec 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -91,7 +91,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, * @*headbuf: in: NULL for first frag, otherwise value returned from prev call * out: set when successful non-complete reassembly, otherwise NULL * @*buf: in: the buffer to append. Always defined - * out: head buf after sucessful complete reassembly, otherwise NULL + * out: head buf after successful complete reassembly, otherwise NULL * Returns 1 when reassembly complete, otherwise 0 */ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) @@ -311,7 +311,7 @@ bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) * @mtu: max allowable size for the bundle buffer, inclusive header * @dnode: destination node for message. (Not always present in header) * Replaces buffer if successful - * Returns true if sucess, otherwise false + * Returns true if success, otherwise false */ bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) { diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 3a6a0a7c0759..30ca8e0e0f84 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -1,7 +1,7 @@ /* * net/tipc/name_table.c: TIPC name table code * - * Copyright (c) 2000-2006, Ericsson AB + * Copyright (c) 2000-2006, 2014, Ericsson AB * Copyright (c) 2004-2008, 2010-2011, Wind River Systems * All rights reserved. * @@ -42,6 +42,12 @@ #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ +static const struct nla_policy +tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = { + [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED } +}; + /** * struct name_info - name sequence publication info * @node_list: circular list of publications made by own node @@ -995,3 +1001,185 @@ void tipc_nametbl_stop(void) table.types = NULL; write_unlock_bh(&tipc_nametbl_lock); } + +int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg, struct name_seq *seq, + struct sub_seq *sseq, u32 *last_publ) +{ + void *hdr; + struct nlattr *attrs; + struct nlattr *publ; + struct publication *p; + + if (*last_publ) { + list_for_each_entry(p, &sseq->info->zone_list, zone_list) + if (p->key == *last_publ) + break; + if (p->key != *last_publ) + return -EPIPE; + } else { + p = list_first_entry(&sseq->info->zone_list, struct publication, + zone_list); + } + + list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) { + *last_publ = p->key; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, + &tipc_genl_v2_family, NLM_F_MULTI, + TIPC_NL_NAME_TABLE_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE); + if (!attrs) + goto msg_full; + + publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL); + if (!publ) + goto attr_msg_full; + + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref)) + goto publ_msg_full; + if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key)) + goto publ_msg_full; + + nla_nest_end(msg->skb, publ); + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + } + *last_publ = 0; + + return 0; + +publ_msg_full: + nla_nest_cancel(msg->skb, publ); +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq, + u32 *last_lower, u32 *last_publ) +{ + struct sub_seq *sseq; + struct sub_seq *sseq_start; + int err; + + if (*last_lower) { + sseq_start = nameseq_find_subseq(seq, *last_lower); + if (!sseq_start) + return -EPIPE; + } else { + sseq_start = seq->sseqs; + } + + for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) { + err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ); + if (err) { + *last_lower = sseq->lower; + return err; + } + } + *last_lower = 0; + + return 0; +} + +int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type, u32 *last_lower, + u32 *last_publ) +{ + struct hlist_head *seq_head; + struct name_seq *seq; + int err; + int i; + + if (*last_type) + i = hash(*last_type); + else + i = 0; + + for (; i < TIPC_NAMETBL_SIZE; i++) { + seq_head = &table.types[i]; + + if (*last_type) { + seq = nametbl_find_seq(*last_type); + if (!seq) + return -EPIPE; + } else { + seq = hlist_entry_safe((seq_head)->first, + struct name_seq, ns_list); + if (!seq) + continue; + } + + hlist_for_each_entry_from(seq, ns_list) { + spin_lock_bh(&seq->lock); + + err = __tipc_nl_subseq_list(msg, seq, last_lower, + last_publ); + + if (err) { + *last_type = seq->type; + spin_unlock_bh(&seq->lock); + return err; + } + spin_unlock_bh(&seq->lock); + } + *last_type = 0; + } + return 0; +} + +int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int done = cb->args[3]; + u32 last_type = cb->args[0]; + u32 last_lower = cb->args[1]; + u32 last_publ = cb->args[2]; + struct tipc_nl_msg msg; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + read_lock_bh(&tipc_nametbl_lock); + + err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ); + if (!err) { + done = 1; + } else if (err != -EMSGSIZE) { + /* We never set seq or call nl_dump_check_consistent() this + * means that setting prev_seq here will cause the consistence + * check to fail in the netlink callback handler. Resulting in + * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if + * we got an error. + */ + cb->prev_seq = 1; + } + + read_unlock_bh(&tipc_nametbl_lock); + + cb->args[0] = last_type; + cb->args[1] = last_lower; + cb->args[2] = last_publ; + cb->args[3] = done; + + return skb->len; +} diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index f02f48b9a216..b38ebecac766 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -1,7 +1,7 @@ /* * net/tipc/name_table.h: Include file for TIPC name table code * - * Copyright (c) 2000-2006, Ericsson AB + * Copyright (c) 2000-2006, 2014, Ericsson AB * Copyright (c) 2004-2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -84,6 +84,8 @@ struct publication { extern rwlock_t tipc_nametbl_lock; +int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); + struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space); u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, diff --git a/net/tipc/net.c b/net/tipc/net.c index 93b9944a6a8b..cf13df3cde8f 100644 --- a/net/tipc/net.c +++ b/net/tipc/net.c @@ -42,6 +42,11 @@ #include "node.h" #include "config.h" +static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { + [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_NET_ID] = { .type = NLA_U32 } +}; + /* * The TIPC locking policy is designed to ensure a very fine locking * granularity, permitting complete parallel access to individual @@ -138,3 +143,104 @@ void tipc_net_stop(void) pr_info("Left network mode\n"); } + +static int __tipc_nl_add_net(struct tipc_nl_msg *msg) +{ + void *hdr; + struct nlattr *attrs; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_NET_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_NET); + if (!attrs) + goto msg_full; + + if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id)) + goto attr_msg_full; + + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int done = cb->args[0]; + struct tipc_nl_msg msg; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + err = __tipc_nl_add_net(&msg); + if (err) + goto out; + + done = 1; +out: + cb->args[0] = done; + + return skb->len; +} + +int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) +{ + int err; + struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; + + if (!info->attrs[TIPC_NLA_NET]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, + info->attrs[TIPC_NLA_NET], + tipc_nl_net_policy); + if (err) + return err; + + if (attrs[TIPC_NLA_NET_ID]) { + u32 val; + + /* Can't change net id once TIPC has joined a network */ + if (tipc_own_addr) + return -EPERM; + + val = nla_get_u32(attrs[TIPC_NLA_NET_ID]); + if (val < 1 || val > 9999) + return -EINVAL; + + tipc_net_id = val; + } + + if (attrs[TIPC_NLA_NET_ADDR]) { + u32 addr; + + /* Can't change net addr once TIPC has joined a network */ + if (tipc_own_addr) + return -EPERM; + + addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); + if (!tipc_addr_node_valid(addr)) + return -EINVAL; + + rtnl_lock(); + tipc_net_start(addr); + rtnl_unlock(); + } + + return 0; +} diff --git a/net/tipc/net.h b/net/tipc/net.h index 59ef3388be2c..a81c1b9eb150 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h @@ -1,7 +1,7 @@ /* * net/tipc/net.h: Include file for TIPC network routing code * - * Copyright (c) 1995-2006, Ericsson AB + * Copyright (c) 1995-2006, 2014, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -37,7 +37,13 @@ #ifndef _TIPC_NET_H #define _TIPC_NET_H +#include <net/genetlink.h> + int tipc_net_start(u32 addr); + void tipc_net_stop(void); +int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); + #endif diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index ad844d365340..b891e3905bc4 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -1,7 +1,7 @@ /* * net/tipc/netlink.c: TIPC configuration handling * - * Copyright (c) 2005-2006, Ericsson AB + * Copyright (c) 2005-2006, 2014, Ericsson AB * Copyright (c) 2005-2007, Wind River Systems * All rights reserved. * @@ -36,6 +36,12 @@ #include "core.h" #include "config.h" +#include "socket.h" +#include "name_table.h" +#include "bearer.h" +#include "link.h" +#include "node.h" +#include "net.h" #include <net/genetlink.h> static int handle_cmd(struct sk_buff *skb, struct genl_info *info) @@ -68,6 +74,19 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info) return 0; } +static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = { + [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, }, + [TIPC_NLA_BEARER] = { .type = NLA_NESTED, }, + [TIPC_NLA_SOCK] = { .type = NLA_NESTED, }, + [TIPC_NLA_PUBL] = { .type = NLA_NESTED, }, + [TIPC_NLA_LINK] = { .type = NLA_NESTED, }, + [TIPC_NLA_MEDIA] = { .type = NLA_NESTED, }, + [TIPC_NLA_NODE] = { .type = NLA_NESTED, }, + [TIPC_NLA_NET] = { .type = NLA_NESTED, }, + [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, } +}; + +/* Legacy ASCII API */ static struct genl_family tipc_genl_family = { .id = GENL_ID_GENERATE, .name = TIPC_GENL_NAME, @@ -76,6 +95,7 @@ static struct genl_family tipc_genl_family = { .maxattr = 0, }; +/* Legacy ASCII API */ static struct genl_ops tipc_genl_ops[] = { { .cmd = TIPC_GENL_CMD, @@ -83,12 +103,122 @@ static struct genl_ops tipc_genl_ops[] = { }, }; +/* Users of the legacy API (tipc-config) can't handle that we add operations, + * so we have a separate genl handling for the new API. + */ +struct genl_family tipc_genl_v2_family = { + .id = GENL_ID_GENERATE, + .name = TIPC_GENL_V2_NAME, + .version = TIPC_GENL_V2_VERSION, + .hdrsize = 0, + .maxattr = TIPC_NLA_MAX, +}; + +static const struct genl_ops tipc_genl_v2_ops[] = { + { + .cmd = TIPC_NL_BEARER_DISABLE, + .doit = tipc_nl_bearer_disable, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_BEARER_ENABLE, + .doit = tipc_nl_bearer_enable, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_BEARER_GET, + .doit = tipc_nl_bearer_get, + .dumpit = tipc_nl_bearer_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_BEARER_SET, + .doit = tipc_nl_bearer_set, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_SOCK_GET, + .dumpit = tipc_nl_sk_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_PUBL_GET, + .dumpit = tipc_nl_publ_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_LINK_GET, + .doit = tipc_nl_link_get, + .dumpit = tipc_nl_link_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_LINK_SET, + .doit = tipc_nl_link_set, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_LINK_RESET_STATS, + .doit = tipc_nl_link_reset_stats, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_MEDIA_GET, + .doit = tipc_nl_media_get, + .dumpit = tipc_nl_media_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_MEDIA_SET, + .doit = tipc_nl_media_set, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_NODE_GET, + .dumpit = tipc_nl_node_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_NET_GET, + .dumpit = tipc_nl_net_dump, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_NET_SET, + .doit = tipc_nl_net_set, + .policy = tipc_nl_policy, + }, + { + .cmd = TIPC_NL_NAME_TABLE_GET, + .dumpit = tipc_nl_name_table_dump, + .policy = tipc_nl_policy, + } +}; + +int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr) +{ + u32 maxattr = tipc_genl_v2_family.maxattr; + + *attr = tipc_genl_v2_family.attrbuf; + if (!*attr) + return -EOPNOTSUPP; + + return nlmsg_parse(nlh, GENL_HDRLEN, *attr, maxattr, tipc_nl_policy); +} + int tipc_netlink_start(void) { int res; res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops); if (res) { + pr_err("Failed to register legacy interface\n"); + return res; + } + + res = genl_register_family_with_ops(&tipc_genl_v2_family, + tipc_genl_v2_ops); + if (res) { pr_err("Failed to register netlink interface\n"); return res; } @@ -98,4 +228,5 @@ int tipc_netlink_start(void) void tipc_netlink_stop(void) { genl_unregister_family(&tipc_genl_family); + genl_unregister_family(&tipc_genl_v2_family); } diff --git a/net/tipc/netlink.h b/net/tipc/netlink.h new file mode 100644 index 000000000000..1425c6869de0 --- /dev/null +++ b/net/tipc/netlink.h @@ -0,0 +1,48 @@ +/* + * net/tipc/netlink.h: Include file for TIPC netlink code + * + * Copyright (c) 2014, Ericsson AB + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the names of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _TIPC_NETLINK_H +#define _TIPC_NETLINK_H + +extern struct genl_family tipc_genl_v2_family; +int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf); + +struct tipc_nl_msg { + struct sk_buff *skb; + u32 portid; + u32 seq; +}; + +#endif diff --git a/net/tipc/node.c b/net/tipc/node.c index 90cee4a6fce4..72a75d4838b2 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -58,6 +58,12 @@ struct tipc_sock_conn { struct list_head list; }; +static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = { + [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 }, + [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG } +}; + /* * A trivial power-of-two bitmask technique is used for speed, since this * operation is done for every incoming TIPC packet. The number of hash table @@ -219,11 +225,11 @@ void tipc_node_abort_sock_conns(struct list_head *conns) void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { struct tipc_link **active = &n_ptr->active_links[0]; - u32 addr = n_ptr->addr; n_ptr->working_links++; - tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, - l_ptr->bearer_id, addr); + n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP; + n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; + pr_info("Established link <%s> on network plane %c\n", l_ptr->name, l_ptr->net_plane); @@ -284,10 +290,10 @@ static void node_select_active_links(struct tipc_node *n_ptr) void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) { struct tipc_link **active; - u32 addr = n_ptr->addr; n_ptr->working_links--; - tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr); + n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN; + n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; if (!tipc_link_is_active(l_ptr)) { pr_info("Lost standby link <%s> on network plane %c\n", @@ -552,28 +558,30 @@ void tipc_node_unlock(struct tipc_node *node) LIST_HEAD(conn_sks); struct sk_buff_head waiting_sks; u32 addr = 0; - unsigned int flags = node->action_flags; + int flags = node->action_flags; + u32 link_id = 0; - if (likely(!node->action_flags)) { + if (likely(!flags)) { spin_unlock_bh(&node->lock); return; } + addr = node->addr; + link_id = node->link_id; __skb_queue_head_init(&waiting_sks); - if (node->action_flags & TIPC_WAKEUP_USERS) { + + if (flags & TIPC_WAKEUP_USERS) skb_queue_splice_init(&node->waiting_sks, &waiting_sks); - node->action_flags &= ~TIPC_WAKEUP_USERS; - } - if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { + + if (flags & TIPC_NOTIFY_NODE_DOWN) { list_replace_init(&node->nsub, &nsub_list); list_replace_init(&node->conn_sks, &conn_sks); - node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; } - if (node->action_flags & TIPC_NOTIFY_NODE_UP) { - node->action_flags &= ~TIPC_NOTIFY_NODE_UP; - addr = node->addr; - } - node->action_flags &= ~TIPC_WAKEUP_BCAST_USERS; + node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN | + TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP | + TIPC_NOTIFY_LINK_DOWN | + TIPC_WAKEUP_BCAST_USERS); + spin_unlock_bh(&node->lock); while (!skb_queue_empty(&waiting_sks)) @@ -588,6 +596,104 @@ void tipc_node_unlock(struct tipc_node *node) if (flags & TIPC_WAKEUP_BCAST_USERS) tipc_bclink_wakeup_users(); - if (addr) + if (flags & TIPC_NOTIFY_NODE_UP) tipc_named_node_up(addr); + + if (flags & TIPC_NOTIFY_LINK_UP) + tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, + TIPC_NODE_SCOPE, link_id, addr); + + if (flags & TIPC_NOTIFY_LINK_DOWN) + tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, + link_id, addr); +} + +/* Caller should hold node lock for the passed node */ +int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) +{ + void *hdr; + struct nlattr *attrs; + + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, + NLM_F_MULTI, TIPC_NL_NODE_GET); + if (!hdr) + return -EMSGSIZE; + + attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE); + if (!attrs) + goto msg_full; + + if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr)) + goto attr_msg_full; + if (tipc_node_is_up(node)) + if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP)) + goto attr_msg_full; + + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; +} + +int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + int done = cb->args[0]; + int last_addr = cb->args[1]; + struct tipc_node *node; + struct tipc_nl_msg msg; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + rcu_read_lock(); + + if (last_addr && !tipc_node_find(last_addr)) { + rcu_read_unlock(); + /* We never set seq or call nl_dump_check_consistent() this + * means that setting prev_seq here will cause the consistence + * check to fail in the netlink callback handler. Resulting in + * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if + * the node state changed while we released the lock. + */ + cb->prev_seq = 1; + return -EPIPE; + } + + list_for_each_entry_rcu(node, &tipc_node_list, list) { + if (last_addr) { + if (node->addr == last_addr) + last_addr = 0; + else + continue; + } + + tipc_node_lock(node); + err = __tipc_nl_add_node(&msg, node); + if (err) { + last_addr = node->addr; + tipc_node_unlock(node); + goto out; + } + + tipc_node_unlock(node); + } + done = 1; +out: + cb->args[0] = done; + cb->args[1] = last_addr; + rcu_read_unlock(); + + return skb->len; } diff --git a/net/tipc/node.h b/net/tipc/node.h index 67513c3c852c..005fbcef3212 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -1,7 +1,7 @@ /* * net/tipc/node.h: Include file for TIPC node management routines * - * Copyright (c) 2000-2006, Ericsson AB + * Copyright (c) 2000-2006, 2014, Ericsson AB * Copyright (c) 2005, 2010-2014, Wind River Systems * All rights reserved. * @@ -53,6 +53,7 @@ * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down * TIPC_NOTIFY_NODE_DOWN: notify node is down * TIPC_NOTIFY_NODE_UP: notify node is up + * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type */ enum { TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), @@ -60,7 +61,9 @@ enum { TIPC_NOTIFY_NODE_DOWN = (1 << 3), TIPC_NOTIFY_NODE_UP = (1 << 4), TIPC_WAKEUP_USERS = (1 << 5), - TIPC_WAKEUP_BCAST_USERS = (1 << 6) + TIPC_WAKEUP_BCAST_USERS = (1 << 6), + TIPC_NOTIFY_LINK_UP = (1 << 7), + TIPC_NOTIFY_LINK_DOWN = (1 << 8) }; /** @@ -100,6 +103,7 @@ struct tipc_node_bclink { * @working_links: number of working links to node (both active and standby) * @link_cnt: number of links to node * @signature: node instance identifier + * @link_id: local and remote bearer ids of changing link, if any * @nsub: list of "node down" subscriptions monitoring node * @rcu: rcu struct for tipc_node */ @@ -116,6 +120,7 @@ struct tipc_node { int link_cnt; int working_links; u32 signature; + u32 link_id; struct list_head nsub; struct sk_buff_head waiting_sks; struct list_head conn_sks; @@ -140,6 +145,8 @@ void tipc_node_unlock(struct tipc_node *node); int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port); void tipc_node_remove_conn(u32 dnode, u32 port); +int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb); + static inline void tipc_node_lock(struct tipc_node *node) { spin_lock_bh(&node->lock); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 75275c5cf929..e91809182c28 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -121,6 +121,14 @@ static const struct proto_ops msg_ops; static struct proto tipc_proto; static struct proto tipc_proto_kern; +static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { + [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 }, + [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 }, + [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED }, + [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG } +}; + /* * Revised TIPC socket locking policy: * @@ -1372,8 +1380,7 @@ restart: sz = buf_len; m->msg_flags |= MSG_TRUNC; } - res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg), - m->msg_iov, sz); + res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz); if (res) goto exit; res = sz; @@ -1473,8 +1480,8 @@ restart: needed = (buf_len - sz_copied); sz_to_copy = (sz <= needed) ? sz : needed; - res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset, - m->msg_iov, sz_to_copy); + res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset, + m, sz_to_copy); if (res) goto exit; @@ -1556,7 +1563,7 @@ static void tipc_data_ready(struct sock *sk) * @tsk: TIPC socket * @msg: message * - * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise + * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise */ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) { @@ -1776,7 +1783,7 @@ int tipc_sk_rcv(struct sk_buff *buf) sk = &tsk->sk; /* Queue message */ - bh_lock_sock(sk); + spin_lock_bh(&sk->sk_lock.slock); if (!sock_owned_by_user(sk)) { rc = filter_rcv(sk, buf); @@ -1787,7 +1794,7 @@ int tipc_sk_rcv(struct sk_buff *buf) if (sk_add_backlog(sk, buf, limit)) rc = -TIPC_ERR_OVERLOAD; } - bh_unlock_sock(sk); + spin_unlock_bh(&sk->sk_lock.slock); tipc_sk_put(tsk); if (likely(!rc)) return 0; @@ -2673,7 +2680,7 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg) case SIOCGETLINKNAME: if (copy_from_user(&lnr, argp, sizeof(lnr))) return -EFAULT; - if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer, + if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer, lnr.linkname, TIPC_MAX_LINK_NAME)) { if (copy_to_user(argp, &lnr, sizeof(lnr))) return -EFAULT; @@ -2802,3 +2809,231 @@ void tipc_socket_stop(void) sock_unregister(tipc_family_ops.family); proto_unregister(&tipc_proto); } + +/* Caller should hold socket lock for the passed tipc socket. */ +int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk) +{ + u32 peer_node; + u32 peer_port; + struct nlattr *nest; + + peer_node = tsk_peer_node(tsk); + peer_port = tsk_peer_port(tsk); + + nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON); + + if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node)) + goto msg_full; + if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port)) + goto msg_full; + + if (tsk->conn_type != 0) { + if (nla_put_flag(skb, TIPC_NLA_CON_FLAG)) + goto msg_full; + if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type)) + goto msg_full; + if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance)) + goto msg_full; + } + nla_nest_end(skb, nest); + + return 0; + +msg_full: + nla_nest_cancel(skb, nest); + + return -EMSGSIZE; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb, + struct tipc_sock *tsk) +{ + int err; + void *hdr; + struct nlattr *attrs; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); + if (!hdr) + goto msg_cancel; + + attrs = nla_nest_start(skb, TIPC_NLA_SOCK); + if (!attrs) + goto genlmsg_cancel; + if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref)) + goto attr_msg_cancel; + if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr)) + goto attr_msg_cancel; + + if (tsk->connected) { + err = __tipc_nl_add_sk_con(skb, tsk); + if (err) + goto attr_msg_cancel; + } else if (!list_empty(&tsk->publications)) { + if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL)) + goto attr_msg_cancel; + } + nla_nest_end(skb, attrs); + genlmsg_end(skb, hdr); + + return 0; + +attr_msg_cancel: + nla_nest_cancel(skb, attrs); +genlmsg_cancel: + genlmsg_cancel(skb, hdr); +msg_cancel: + return -EMSGSIZE; +} + +int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + struct tipc_sock *tsk; + u32 prev_ref = cb->args[0]; + u32 ref = prev_ref; + + tsk = tipc_sk_get_next(&ref); + for (; tsk; tsk = tipc_sk_get_next(&ref)) { + lock_sock(&tsk->sk); + err = __tipc_nl_add_sk(skb, cb, tsk); + release_sock(&tsk->sk); + tipc_sk_put(tsk); + if (err) + break; + + prev_ref = ref; + } + + cb->args[0] = prev_ref; + + return skb->len; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +int __tipc_nl_add_sk_publ(struct sk_buff *skb, struct netlink_callback *cb, + struct publication *publ) +{ + void *hdr; + struct nlattr *attrs; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); + if (!hdr) + goto msg_cancel; + + attrs = nla_nest_start(skb, TIPC_NLA_PUBL); + if (!attrs) + goto genlmsg_cancel; + + if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key)) + goto attr_msg_cancel; + if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type)) + goto attr_msg_cancel; + if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower)) + goto attr_msg_cancel; + if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper)) + goto attr_msg_cancel; + + nla_nest_end(skb, attrs); + genlmsg_end(skb, hdr); + + return 0; + +attr_msg_cancel: + nla_nest_cancel(skb, attrs); +genlmsg_cancel: + genlmsg_cancel(skb, hdr); +msg_cancel: + return -EMSGSIZE; +} + +/* Caller should hold socket lock for the passed tipc socket. */ +int __tipc_nl_list_sk_publ(struct sk_buff *skb, struct netlink_callback *cb, + struct tipc_sock *tsk, u32 *last_publ) +{ + int err; + struct publication *p; + + if (*last_publ) { + list_for_each_entry(p, &tsk->publications, pport_list) { + if (p->key == *last_publ) + break; + } + if (p->key != *last_publ) { + /* We never set seq or call nl_dump_check_consistent() + * this means that setting prev_seq here will cause the + * consistence check to fail in the netlink callback + * handler. Resulting in the last NLMSG_DONE message + * having the NLM_F_DUMP_INTR flag set. + */ + cb->prev_seq = 1; + *last_publ = 0; + return -EPIPE; + } + } else { + p = list_first_entry(&tsk->publications, struct publication, + pport_list); + } + + list_for_each_entry_from(p, &tsk->publications, pport_list) { + err = __tipc_nl_add_sk_publ(skb, cb, p); + if (err) { + *last_publ = p->key; + return err; + } + } + *last_publ = 0; + + return 0; +} + +int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) +{ + int err; + u32 tsk_ref = cb->args[0]; + u32 last_publ = cb->args[1]; + u32 done = cb->args[2]; + struct tipc_sock *tsk; + + if (!tsk_ref) { + struct nlattr **attrs; + struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; + + err = tipc_nlmsg_parse(cb->nlh, &attrs); + if (err) + return err; + + err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, + attrs[TIPC_NLA_SOCK], + tipc_nl_sock_policy); + if (err) + return err; + + if (!sock[TIPC_NLA_SOCK_REF]) + return -EINVAL; + + tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); + } + + if (done) + return 0; + + tsk = tipc_sk_get(tsk_ref); + if (!tsk) + return -EINVAL; + + lock_sock(&tsk->sk); + err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ); + if (!err) + done = 1; + release_sock(&tsk->sk); + tipc_sk_put(tsk); + + cb->args[0] = tsk_ref; + cb->args[1] = last_publ; + cb->args[2] = done; + + return skb->len; +} diff --git a/net/tipc/socket.h b/net/tipc/socket.h index baa43d03901e..d34089387006 100644 --- a/net/tipc/socket.h +++ b/net/tipc/socket.h @@ -36,6 +36,7 @@ #define _TIPC_SOCK_H #include <net/sock.h> +#include <net/genetlink.h> #define TIPC_CONNACK_INTV 256 #define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) @@ -47,5 +48,7 @@ void tipc_sk_mcast_rcv(struct sk_buff *buf); void tipc_sk_reinit(void); int tipc_sk_ref_table_init(u32 requested_size, u32 start); void tipc_sk_ref_table_stop(void); +int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb); +int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb); #endif diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e96884380732..5eee625d113f 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1825,7 +1825,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, else if (size < skb->len - skip) msg->msg_flags |= MSG_TRUNC; - err = skb_copy_datagram_iovec(skb, skip, msg->msg_iov, size); + err = skb_copy_datagram_msg(skb, skip, msg, size); if (err) goto out_free; @@ -2030,8 +2030,8 @@ again: } chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); - if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip, - msg->msg_iov, chunk)) { + if (skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip, + msg, chunk)) { if (copied == 0) copied = -EFAULT; break; diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 9bb63ffec4f2..a57ddef7d5af 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -1773,8 +1773,7 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb, } /* Place the datagram payload in the user's iovec. */ - err = skb_copy_datagram_iovec(skb, sizeof(*dg), msg->msg_iov, - payload_len); + err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len); if (err) goto out; diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5ad4418ef093..59e785bfde65 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1335,7 +1335,7 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, /* Currently, each datagram always contains a complete record */ msg->msg_flags |= MSG_EOR; - rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + rc = skb_copy_datagram_msg(skb, 0, msg, copied); if (rc) goto out_free_dgram; diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 499d6c18a8ce..7c532856b398 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c @@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb) kfree_skb(skb); if (IS_ERR(segs)) return PTR_ERR(segs); + if (segs == NULL) + return -EINVAL; do { struct sk_buff *nskb = segs->next; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4c4e457e7888..88bf289abdc9 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1962,7 +1962,7 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) struct xfrm_policy *pol = xdst->pols[0]; struct xfrm_policy_queue *pq = &pol->polq; - if (unlikely(skb_fclone_busy(skb))) { + if (unlikely(skb_fclone_busy(sk, skb))) { kfree_skb(skb); return 0; } |