From c0e30763f7ef9c7b7ff663204c9439bdbcac72ca Mon Sep 17 00:00:00 2001 From: Javier Lopez Date: Mon, 16 Dec 2013 10:01:07 -0800 Subject: mac80211_hwsim: Fix NULL pointer dereference mac80211_hwsim was crashing when receiving tx information from user space. Crash happens because txi->rate_driver_data[0] is pointing to a non valid memory address. This code path is only used by wmediumd and wmediumd doesn't provide multiple channel support, so we can pass the channel struct (data2->channel) directly to mac80211_hwsim_monitor_ack function. Signed-off-by: Javier Lopez Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c72438bb2faf..a1b32ee9594a 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -2011,7 +2011,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, (hwsim_flags & HWSIM_TX_STAT_ACK)) { if (skb->len >= 16) { hdr = (struct ieee80211_hdr *) skb->data; - mac80211_hwsim_monitor_ack(txi->rate_driver_data[0], + mac80211_hwsim_monitor_ack(data2->channel, hdr->addr2); } txi->flags |= IEEE80211_TX_STAT_ACK; -- cgit v1.2.3 From 277d916fc2e959c3f106904116bb4f7b1148d47a Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Mon, 16 Dec 2013 21:39:50 +0100 Subject: mac80211: move "bufferable MMPDU" check to fix AP mode scan The check needs to apply to both multicast and unicast packets, otherwise probe requests on AP mode scans are sent through the multicast buffer queue, which adds long delays (often longer than the scanning interval). Cc: stable@vger.kernel.org Signed-off-by: Felix Fietkau Signed-off-by: Johannes Berg --- net/mac80211/tx.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index c558b246ef00..ca7fa7f0613d 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -463,7 +463,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) { struct sta_info *sta = tx->sta; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; struct ieee80211_local *local = tx->local; if (unlikely(!sta)) @@ -474,15 +473,6 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) { int ac = skb_get_queue_mapping(tx->skb); - /* only deauth, disassoc and action are bufferable MMPDUs */ - if (ieee80211_is_mgmt(hdr->frame_control) && - !ieee80211_is_deauth(hdr->frame_control) && - !ieee80211_is_disassoc(hdr->frame_control) && - !ieee80211_is_action(hdr->frame_control)) { - info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; - return TX_CONTINUE; - } - ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n", sta->sta.addr, sta->sta.aid, ac); if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) @@ -525,9 +515,22 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) static ieee80211_tx_result debug_noinline ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; + if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) return TX_CONTINUE; + /* only deauth, disassoc and action are bufferable MMPDUs */ + if (ieee80211_is_mgmt(hdr->frame_control) && + !ieee80211_is_deauth(hdr->frame_control) && + !ieee80211_is_disassoc(hdr->frame_control) && + !ieee80211_is_action(hdr->frame_control)) { + if (tx->flags & IEEE80211_TX_UNICAST) + info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER; + return TX_CONTINUE; + } + if (tx->flags & IEEE80211_TX_UNICAST) return ieee80211_tx_h_unicast_ps_buf(tx); else -- cgit v1.2.3 From efc5520c4224479967b7381775a63fb48283b082 Mon Sep 17 00:00:00 2001 From: Oren Givon Date: Thu, 19 Dec 2013 05:07:21 +0200 Subject: iwlwifi: add new devices for 7265 series Add new device / subdevice ID for 7265 series. Fix 2 mistakes on the way. Signed-off-by: Oren Givon Reviewed-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/iwlwifi/pcie/drv.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 86605027c41d..e6272546395a 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -357,21 +357,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)}, #endif /* CONFIG_IWLMVM */ -- cgit v1.2.3 From d31652a26bc9e752a99b6ac3b61353129934e451 Mon Sep 17 00:00:00 2001 From: Arron Wang Date: Thu, 14 Nov 2013 17:03:41 +0800 Subject: NFC: Fix target mode p2p link establishment With commit e29a9e2ae165620d, we set the active_target pointer from nfc_dep_link_is_up() in order to support the case where the target detection and the DEP link setting are done atomically by the driver. That can only happen in initiator mode, so we need to check for that otherwise we fail to bring a p2p link in target mode. Signed-off-by: Arron Wang Signed-off-by: Samuel Ortiz --- net/nfc/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/nfc/core.c b/net/nfc/core.c index 872529105abc..83b9927e7d19 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -384,7 +384,7 @@ int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, { dev->dep_link_up = true; - if (!dev->active_target) { + if (!dev->active_target && rf_mode == NFC_RF_INITIATOR) { struct nfc_target *target; target = nfc_find_target(dev, target_idx); -- cgit v1.2.3 From 2690d97ade05c5325cbf7c72b94b90d265659886 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 31 Dec 2013 16:28:39 +0100 Subject: netfilter: nf_nat: fix access to uninitialized buffer in IRC NAT helper Commit 5901b6be885e attempted to introduce IPv6 support into IRC NAT helper. By doing so, the following code seemed to be removed by accident: ip = ntohl(exp->master->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip); sprintf(buffer, "%u %u", ip, port); pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", buffer, &ip, port); This leads to the fact that buffer[] was left uninitialized and contained some stack value. When we call nf_nat_mangle_tcp_packet(), we call strlen(buffer) on excatly this uninitialized buffer. If we are unlucky and the skb has enough tailroom, we overwrite resp. leak contents with values that sit on our stack into the packet and send that out to the receiver. Since the rather informal DCC spec [1] does not seem to specify IPv6 support right now, we log such occurences so that admins can act accordingly, and drop the packet. I've looked into XChat source, and IPv6 is not supported there: addresses are in u32 and print via %u format string. Therefore, restore old behaviour as in IPv4, use snprintf(). The IRC helper does not support IPv6 by now. By this, we can safely use strlen(buffer) in nf_nat_mangle_tcp_packet() and prevent a buffer overflow. Also simplify some code as we now have ct variable anyway. [1] http://www.irchelp.org/irchelp/rfc/ctcpspec.html Fixes: 5901b6be885e ("netfilter: nf_nat: support IPv6 in IRC NAT helper") Signed-off-by: Daniel Borkmann Cc: Harald Welte Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_nat_irc.c | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nf_nat_irc.c b/net/netfilter/nf_nat_irc.c index f02b3605823e..1fb2258c3535 100644 --- a/net/netfilter/nf_nat_irc.c +++ b/net/netfilter/nf_nat_irc.c @@ -34,10 +34,14 @@ static unsigned int help(struct sk_buff *skb, struct nf_conntrack_expect *exp) { char buffer[sizeof("4294967296 65635")]; + struct nf_conn *ct = exp->master; + union nf_inet_addr newaddr; u_int16_t port; unsigned int ret; /* Reply comes from server. */ + newaddr = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3; + exp->saved_proto.tcp.port = exp->tuple.dst.u.tcp.port; exp->dir = IP_CT_DIR_REPLY; exp->expectfn = nf_nat_follow_master; @@ -57,17 +61,35 @@ static unsigned int help(struct sk_buff *skb, } if (port == 0) { - nf_ct_helper_log(skb, exp->master, "all ports in use"); + nf_ct_helper_log(skb, ct, "all ports in use"); return NF_DROP; } - ret = nf_nat_mangle_tcp_packet(skb, exp->master, ctinfo, - protoff, matchoff, matchlen, buffer, - strlen(buffer)); + /* strlen("\1DCC CHAT chat AAAAAAAA P\1\n")=27 + * strlen("\1DCC SCHAT chat AAAAAAAA P\1\n")=28 + * strlen("\1DCC SEND F AAAAAAAA P S\1\n")=26 + * strlen("\1DCC MOVE F AAAAAAAA P S\1\n")=26 + * strlen("\1DCC TSEND F AAAAAAAA P S\1\n")=27 + * + * AAAAAAAAA: bound addr (1.0.0.0==16777216, min 8 digits, + * 255.255.255.255==4294967296, 10 digits) + * P: bound port (min 1 d, max 5d (65635)) + * F: filename (min 1 d ) + * S: size (min 1 d ) + * 0x01, \n: terminators + */ + /* AAA = "us", ie. where server normally talks to. */ + snprintf(buffer, sizeof(buffer), "%u %u", ntohl(newaddr.ip), port); + pr_debug("nf_nat_irc: inserting '%s' == %pI4, port %u\n", + buffer, &newaddr.ip, port); + + ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff, matchoff, + matchlen, buffer, strlen(buffer)); if (ret != NF_ACCEPT) { - nf_ct_helper_log(skb, exp->master, "cannot mangle packet"); + nf_ct_helper_log(skb, ct, "cannot mangle packet"); nf_ct_unexpect_related(exp); } + return ret; } -- cgit v1.2.3 From f2661adc0c134d890d84c32d7cb54a2b4d1f0a5f Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Sat, 4 Jan 2014 14:10:43 +0100 Subject: netfilter: only warn once on wrong seqadj usage Avoid potentially spamming the kernel log with WARN splash messages when catching wrong usage of seqadj, by simply using WARN_ONCE. This is a followup to commit db12cf274353 (netfilter: WARN about wrong usage of sequence number adjustments) Suggested-by: Flavio Leitner Suggested-by: Daniel Borkmann Suggested-by: Florian Westphal Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_seqadj.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c index b2d38da67822..f6e2ae91a80b 100644 --- a/net/netfilter/nf_conntrack_seqadj.c +++ b/net/netfilter/nf_conntrack_seqadj.c @@ -37,7 +37,7 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo, return 0; if (unlikely(!seqadj)) { - WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n"); + WARN_ONCE(1, "Missing nfct_seqadj_ext_add() setup call\n"); return 0; } -- cgit v1.2.3 From 657e5d19657542631461e72fdc375b1e83e72070 Mon Sep 17 00:00:00 2001 From: Li RongQing Date: Tue, 7 Jan 2014 15:39:43 +0800 Subject: ipv6: pcpu_tstats.syncp should be initialised in ip6_vti.c initialise pcpu_tstats.syncp to kill the calltrace [ 11.973950] Call Trace: [ 11.973950] [<819bbaff>] dump_stack+0x48/0x60 [ 11.973950] [<819bbaff>] dump_stack+0x48/0x60 [ 11.973950] [<81078dcf>] __lock_acquire.isra.22+0x1bf/0xc10 [ 11.973950] [<81078dcf>] __lock_acquire.isra.22+0x1bf/0xc10 [ 11.973950] [<81079fa7>] lock_acquire+0x77/0xa0 [ 11.973950] [<81079fa7>] lock_acquire+0x77/0xa0 [ 11.973950] [<817ca7ab>] ? dev_get_stats+0xcb/0x130 [ 11.973950] [<817ca7ab>] ? dev_get_stats+0xcb/0x130 [ 11.973950] [<8183862d>] ip_tunnel_get_stats64+0x6d/0x230 [ 11.973950] [<8183862d>] ip_tunnel_get_stats64+0x6d/0x230 [ 11.973950] [<817ca7ab>] ? dev_get_stats+0xcb/0x130 [ 11.973950] [<817ca7ab>] ? dev_get_stats+0xcb/0x130 [ 11.973950] [<811cf8c1>] ? __nla_reserve+0x21/0xd0 [ 11.973950] [<811cf8c1>] ? __nla_reserve+0x21/0xd0 [ 11.973950] [<817ca7ab>] dev_get_stats+0xcb/0x130 [ 11.973950] [<817ca7ab>] dev_get_stats+0xcb/0x130 [ 11.973950] [<817d5409>] rtnl_fill_ifinfo+0x569/0xe20 [ 11.973950] [<817d5409>] rtnl_fill_ifinfo+0x569/0xe20 [ 11.973950] [<810352e0>] ? kvm_clock_read+0x20/0x30 [ 11.973950] [<810352e0>] ? kvm_clock_read+0x20/0x30 [ 11.973950] [<81008e38>] ? sched_clock+0x8/0x10 [ 11.973950] [<81008e38>] ? sched_clock+0x8/0x10 [ 11.973950] [<8106ba45>] ? sched_clock_local+0x25/0x170 [ 11.973950] [<8106ba45>] ? sched_clock_local+0x25/0x170 [ 11.973950] [<810da6bd>] ? __kmalloc+0x3d/0x90 [ 11.973950] [<810da6bd>] ? __kmalloc+0x3d/0x90 [ 11.973950] [<817b8c10>] ? __kmalloc_reserve.isra.41+0x20/0x70 [ 11.973950] [<817b8c10>] ? __kmalloc_reserve.isra.41+0x20/0x70 [ 11.973950] [<810da81a>] ? slob_alloc_node+0x2a/0x60 [ 11.973950] [<810da81a>] ? slob_alloc_node+0x2a/0x60 [ 11.973950] [<817b919a>] ? __alloc_skb+0x6a/0x2b0 [ 11.973950] [<817b919a>] ? __alloc_skb+0x6a/0x2b0 [ 11.973950] [<817d8795>] rtmsg_ifinfo+0x65/0xe0 [ 11.973950] [<817d8795>] rtmsg_ifinfo+0x65/0xe0 [ 11.973950] [<817cbd31>] register_netdevice+0x531/0x5a0 [ 11.973950] [<817cbd31>] register_netdevice+0x531/0x5a0 [ 11.973950] [<81892b87>] ? ip6_tnl_get_cap+0x27/0x90 [ 11.973950] [<81892b87>] ? ip6_tnl_get_cap+0x27/0x90 [ 11.973950] [<817cbdb6>] register_netdev+0x16/0x30 [ 11.973950] [<817cbdb6>] register_netdev+0x16/0x30 [ 11.973950] [<81f574a6>] vti6_init_net+0x1c4/0x1d4 [ 11.973950] [<81f574a6>] vti6_init_net+0x1c4/0x1d4 [ 11.973950] [<81f573af>] ? vti6_init_net+0xcd/0x1d4 [ 11.973950] [<81f573af>] ? vti6_init_net+0xcd/0x1d4 [ 11.973950] [<817c16df>] ops_init.constprop.11+0x17f/0x1c0 [ 11.973950] [<817c16df>] ops_init.constprop.11+0x17f/0x1c0 [ 11.973950] [<817c1779>] register_pernet_operations.isra.9+0x59/0x90 [ 11.973950] [<817c1779>] register_pernet_operations.isra.9+0x59/0x90 [ 11.973950] [<817c18d1>] register_pernet_device+0x21/0x60 [ 11.973950] [<817c18d1>] register_pernet_device+0x21/0x60 [ 11.973950] [<81f574b6>] ? vti6_init_net+0x1d4/0x1d4 [ 11.973950] [<81f574b6>] ? vti6_init_net+0x1d4/0x1d4 [ 11.973950] [<81f574c7>] vti6_tunnel_init+0x11/0x68 [ 11.973950] [<81f574c7>] vti6_tunnel_init+0x11/0x68 [ 11.973950] [<81f572a1>] ? mip6_init+0x73/0xb4 [ 11.973950] [<81f572a1>] ? mip6_init+0x73/0xb4 [ 11.973950] [<81f0cba4>] do_one_initcall+0xbb/0x15b [ 11.973950] [<81f0cba4>] do_one_initcall+0xbb/0x15b [ 11.973950] [<811a00d8>] ? sha_transform+0x528/0x1150 [ 11.973950] [<811a00d8>] ? sha_transform+0x528/0x1150 [ 11.973950] [<81f0c544>] ? repair_env_string+0x12/0x51 [ 11.973950] [<81f0c544>] ? repair_env_string+0x12/0x51 [ 11.973950] [<8105c30d>] ? parse_args+0x2ad/0x440 [ 11.973950] [<8105c30d>] ? parse_args+0x2ad/0x440 [ 11.973950] [<810546be>] ? __usermodehelper_set_disable_depth+0x3e/0x50 [ 11.973950] [<810546be>] ? __usermodehelper_set_disable_depth+0x3e/0x50 [ 11.973950] [<81f0cd27>] kernel_init_freeable+0xe3/0x182 [ 11.973950] [<81f0cd27>] kernel_init_freeable+0xe3/0x182 [ 11.973950] [<81f0c532>] ? do_early_param+0x7a/0x7a [ 11.973950] [<81f0c532>] ? do_early_param+0x7a/0x7a [ 11.973950] [<819b5b1b>] kernel_init+0xb/0x100 [ 11.973950] [<819b5b1b>] kernel_init+0xb/0x100 [ 11.973950] [<819cebf7>] ret_from_kernel_thread+0x1b/0x28 [ 11.973950] [<819cebf7>] ret_from_kernel_thread+0x1b/0x28 [ 11.973950] [<819b5b10>] ? rest_init+0xc0/0xc0 [ 11.973950] [<819b5b10>] ? rest_init+0xc0/0xc0 Before 469bdcefdc ("ipv6: fix the use of pcpu_tstats in ip6_vti.c"), the pcpu_tstats.syncp is not used to pretect the 64bit elements of pcpu_tstats, so not appear this calltrace. Reported-by: Fengguang Wu Signed-off-by: Li RongQing Signed-off-by: David S. Miller --- net/ipv6/ip6_vti.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index a4564b05c47b..7b42d5ef868d 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -732,12 +732,18 @@ static void vti6_dev_setup(struct net_device *dev) static inline int vti6_dev_init_gen(struct net_device *dev) { struct ip6_tnl *t = netdev_priv(dev); + int i; t->dev = dev; t->net = dev_net(dev); dev->tstats = alloc_percpu(struct pcpu_tstats); if (!dev->tstats) return -ENOMEM; + for_each_possible_cpu(i) { + struct pcpu_tstats *stats; + stats = per_cpu_ptr(dev->tstats, i); + u64_stats_init(&stats->syncp); + } return 0; } -- cgit v1.2.3 From 732256b9335f8456623bb772d86c2a24e3cafca2 Mon Sep 17 00:00:00 2001 From: Erik Hugne Date: Tue, 7 Jan 2014 15:51:36 -0500 Subject: tipc: correctly unlink packets from deferred packet queue When we pull a received packet from a link's 'deferred packets' queue for processing, its 'next' pointer is not cleared, and still refers to the next packet in that queue, if any. This is incorrect, but caused no harm before commit 40ba3cdf542a469aaa9083fa041656e59b109b90 ("tipc: message reassembly using fragment chain") was introduced. After that commit, it may sometimes lead to the following oops: general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC Modules linked in: tipc CPU: 4 PID: 0 Comm: swapper/4 Tainted: G W 3.13.0-rc2+ #6 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007 task: ffff880017af4880 ti: ffff880017aee000 task.ti: ffff880017aee000 RIP: 0010:[] [] skb_try_coalesce+0x44/0x3d0 RSP: 0018:ffff880016603a78 EFLAGS: 00010212 RAX: 6b6b6b6bd6d6d6d6 RBX: ffff880013106ac0 RCX: ffff880016603ad0 RDX: ffff880016603ad7 RSI: ffff88001223ed00 RDI: ffff880013106ac0 RBP: ffff880016603ab8 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000001 R11: 0000000000000000 R12: ffff88001223ed00 R13: ffff880016603ad0 R14: 000000000000058c R15: ffff880012297650 FS: 0000000000000000(0000) GS:ffff880016600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 000000000805b000 CR3: 0000000011f5d000 CR4: 00000000000006e0 Stack: ffff880016603a88 ffffffff810a38ed ffff880016603aa8 ffff88001223ed00 0000000000000001 ffff880012297648 ffff880016603b68 ffff880012297650 ffff880016603b08 ffffffffa0006c51 ffff880016603b08 00ffffffa00005fc Call Trace: [] ? trace_hardirqs_on+0xd/0x10 [] tipc_link_recv_fragment+0xd1/0x1b0 [tipc] [] tipc_recv_msg+0x4e4/0x920 [tipc] [] ? tipc_l2_rcv_msg+0x40/0x250 [tipc] [] tipc_l2_rcv_msg+0xcc/0x250 [tipc] [] ? tipc_l2_rcv_msg+0x40/0x250 [tipc] [] __netif_receive_skb_core+0x80b/0xd00 [] ? __netif_receive_skb_core+0x144/0xd00 [] __netif_receive_skb+0x26/0x70 [] netif_receive_skb+0x2d/0x200 [] napi_gro_receive+0xb0/0x130 [] e1000_clean_rx_irq+0x2c2/0x530 [] e1000_clean+0x266/0x9c0 [] ? notifier_call_chain+0x2b/0x160 [] net_rx_action+0x141/0x310 [] __do_softirq+0xeb/0x480 [] ? _raw_spin_unlock+0x2b/0x40 [] ? handle_fasteoi_irq+0x72/0x100 [] irq_exit+0x96/0xc0 [] do_IRQ+0x63/0xe0 [] common_interrupt+0x6f/0x6f This happens when the last fragment of a message has passed through the the receiving link's 'deferred packets' queue, and at least one other packet was added to that queue while it was there. After the fragment chain with the complete message has been successfully delivered to the receiving socket, it is released. Since 'next' pointer of the last fragment in the released chain now is non-NULL, we get the crash shown above. We fix this by clearing the 'next' pointer of all received packets, including those being pulled from the 'deferred' queue, before they undergo any further processing. Fixes: 40ba3cdf542a4 ("tipc: message reassembly using fragment chain") Signed-off-by: Erik Hugne Reported-by: Ying Xue Reviewed-by: Paul Gortmaker Signed-off-by: Jon Maloy Signed-off-by: David S. Miller --- net/tipc/link.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/tipc/link.c b/net/tipc/link.c index 69cd9bf3f561..13b987745820 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1498,6 +1498,7 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) int type; head = head->next; + buf->next = NULL; /* Ensure bearer is still enabled */ if (unlikely(!b_ptr->active)) -- cgit v1.2.3 From 9a2620c877454bb4b5c19f73d4d1d7b375da3632 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Tue, 7 Jan 2014 12:07:41 +0200 Subject: bnx2x: prevent WARN during driver unload Starting with commit 80c33dd "net: add might_sleep() call to napi_disable" bnx2x fails the might_sleep tests causing a stack trace to appear whenever the driver is unloaded, as local_bh_disable() is being called before napi_disable(). This changes the locking schematics related to CONFIG_NET_RX_BUSY_POLL, preventing the need for calling local_bh_disable() and thus eliminating the issue. Signed-off-by: Yuval Mintz Signed-off-by: Dmitry Kravkov Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 44 +++++++++++++++++++------ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 12 +++---- 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 2d5fce4c9751..ec6119089b82 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -520,10 +520,12 @@ struct bnx2x_fastpath { #define BNX2X_FP_STATE_IDLE 0 #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ #define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */ -#define BNX2X_FP_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this FP */ -#define BNX2X_FP_STATE_POLL_YIELD (1 << 3) /* poll yielded this FP */ +#define BNX2X_FP_STATE_DISABLED (1 << 2) +#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */ +#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */ +#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) #define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD) -#define BNX2X_FP_LOCKED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL) +#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED) #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) /* protect state */ spinlock_t lock; @@ -613,7 +615,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) { bool rc = true; - spin_lock(&fp->lock); + spin_lock_bh(&fp->lock); if (fp->state & BNX2X_FP_LOCKED) { WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); fp->state |= BNX2X_FP_STATE_NAPI_YIELD; @@ -622,7 +624,7 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) /* we don't care if someone yielded */ fp->state = BNX2X_FP_STATE_NAPI; } - spin_unlock(&fp->lock); + spin_unlock_bh(&fp->lock); return rc; } @@ -631,14 +633,16 @@ static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) { bool rc = false; - spin_lock(&fp->lock); + spin_lock_bh(&fp->lock); WARN_ON(fp->state & (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD)); if (fp->state & BNX2X_FP_STATE_POLL_YIELD) rc = true; - fp->state = BNX2X_FP_STATE_IDLE; - spin_unlock(&fp->lock); + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); return rc; } @@ -669,7 +673,9 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) if (fp->state & BNX2X_FP_STATE_POLL_YIELD) rc = true; - fp->state = BNX2X_FP_STATE_IDLE; + + /* state ==> idle, unless currently disabled */ + fp->state &= BNX2X_FP_STATE_DISABLED; spin_unlock_bh(&fp->lock); return rc; } @@ -677,9 +683,23 @@ static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) /* true if a socket is polling, even if it did not get the lock */ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { - WARN_ON(!(fp->state & BNX2X_FP_LOCKED)); + WARN_ON(!(fp->state & BNX2X_FP_OWNED)); return fp->state & BNX2X_FP_USER_PEND; } + +/* false if fp is currently owned */ +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + int rc = true; + + spin_lock_bh(&fp->lock); + if (fp->state & BNX2X_FP_OWNED) + rc = false; + fp->state |= BNX2X_FP_STATE_DISABLED; + spin_unlock_bh(&fp->lock); + + return rc; +} #else static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) { @@ -709,6 +729,10 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) { return false; } +static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) +{ + return true; +} #endif /* CONFIG_NET_RX_BUSY_POLL */ /* Use 2500 as a mini-jumbo MTU for FCoE */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index ec96130533cc..c6745d7def5c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1790,26 +1790,22 @@ static void bnx2x_napi_disable_cnic(struct bnx2x *bp) { int i; - local_bh_disable(); for_each_rx_queue_cnic(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_lock_napi(&bp->fp[i])) - mdelay(1); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); } - local_bh_enable(); } static void bnx2x_napi_disable(struct bnx2x *bp) { int i; - local_bh_disable(); for_each_eth_queue(bp, i) { napi_disable(&bnx2x_fp(bp, i, napi)); - while (!bnx2x_fp_lock_napi(&bp->fp[i])) - mdelay(1); + while (!bnx2x_fp_ll_disable(&bp->fp[i])) + usleep_range(1000, 2000); } - local_bh_enable(); } void bnx2x_netif_start(struct bnx2x *bp) -- cgit v1.2.3 From 07edd741c838e376933b445bbf2692f83b6774e6 Mon Sep 17 00:00:00 2001 From: Hannes Frederic Sowa Date: Wed, 8 Jan 2014 15:43:22 +0100 Subject: ipv6: add link-local, sit and loopback address with INFINITY_LIFE_TIME MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the past the IFA_PERMANENT flag indicated, that the valid and preferred lifetime where ignored. Since change fad8da3e085ddf ("ipv6 addrconf: fix preferred lifetime state-changing behavior while valid_lft is infinity") we honour at least the preferred lifetime on those addresses. As such the valid lifetime gets recalculated and updated to 0. If loopback address is added manually this problem does not occur. Also if NetworkManager manages IPv6, those addresses will get added via inet6_rtm_newaddr and thus will have a correct lifetime, too. Reported-by: François-Xavier Le Bail Reported-by: Damien Wyart Fixes: fad8da3e085ddf ("ipv6 addrconf: fix preferred lifetime state-changing behavior while valid_lft is infinity") Cc: Yasushi Asano Signed-off-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- net/ipv6/addrconf.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f62c72b59f8e..abe46a4228ce 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2509,7 +2509,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, struct inet6_ifaddr *ifp; ifp = ipv6_add_addr(idev, addr, NULL, plen, - scope, IFA_F_PERMANENT, 0, 0); + scope, IFA_F_PERMANENT, + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); if (!IS_ERR(ifp)) { spin_lock_bh(&ifp->lock); ifp->flags &= ~IFA_F_TENTATIVE; @@ -2637,7 +2638,8 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr #endif - ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0); + ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, + INFINITY_LIFE_TIME, INFINITY_LIFE_TIME); if (!IS_ERR(ifp)) { addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); addrconf_dad_start(ifp); -- cgit v1.2.3 From 95e92fd40c967c363ad66b2fd1ce4dcd68132e54 Mon Sep 17 00:00:00 2001 From: Michal Schmidt Date: Thu, 9 Jan 2014 14:36:27 +0100 Subject: bnx2x: fix DMA unmapping of TSO split BDs bnx2x triggers warnings with CONFIG_DMA_API_DEBUG=y: WARNING: CPU: 0 PID: 2253 at lib/dma-debug.c:887 check_unmap+0xf8/0x920() bnx2x 0000:28:00.0: DMA-API: device driver frees DMA memory with different size [device address=0x00000000da2b389e] [map size=1490 bytes] [unmap size=66 bytes] The reason is that bnx2x splits a TSO BD into two BDs (headers + data) using one DMA mapping for both, but it uses only the length of the first BD when unmapping. This patch fixes the bug by unmapping the whole length of the two BDs. Signed-off-by: Michal Schmidt Reviewed-by: Eric Dumazet Acked-by: Dmitry Kravkov Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index c6745d7def5c..550412088dd0 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -160,6 +160,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, struct sk_buff *skb = tx_buf->skb; u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; int nbd; + u16 split_bd_len = 0; /* prefetch skb end pointer to speedup dev_kfree_skb() */ prefetch(&skb->end); @@ -167,10 +168,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", txdata->txq_index, idx, tx_buf, skb); - /* unmap first bd */ tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; - dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), - BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); nbd = le16_to_cpu(tx_start_bd->nbd) - 1; #ifdef BNX2X_STOP_ON_ERROR @@ -188,12 +186,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); - /* ...and the TSO split header bd since they have no mapping */ + /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { + tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; + split_bd_len = BD_UNMAP_LEN(tx_data_bd); --nbd; bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); } + /* unmap first bd */ + dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), + BD_UNMAP_LEN(tx_start_bd) + split_bd_len, + DMA_TO_DEVICE); + /* now free frags */ while (nbd > 0) { -- cgit v1.2.3 From b13ba1b83f524732523db1079e56478b32c85c96 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 10 Jan 2014 16:18:25 +0800 Subject: macvlan: forbid L2 fowarding offload for macvtap L2 fowarding offload will bypass the rx handler of real device. This will make the packet could not be forwarded to macvtap device. Another problem is the dev_hard_start_xmit() called for macvtap does not have any synchronization. Fix this by forbidding L2 forwarding for macvtap. Cc: John Fastabend Cc: Neil Horman Acked-by: Neil Horman Signed-off-by: Jason Wang Acked-by: John Fastabend Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 60406b01f9eb..5360f73c9817 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -338,6 +338,8 @@ static const struct header_ops macvlan_hard_header_ops = { .cache_update = eth_header_cache_update, }; +static struct rtnl_link_ops macvlan_link_ops; + static int macvlan_open(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); @@ -353,7 +355,8 @@ static int macvlan_open(struct net_device *dev) goto hash_add; } - if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) { + if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD && + dev->rtnl_link_ops == &macvlan_link_ops) { vlan->fwd_priv = lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev); -- cgit v1.2.3 From f663dd9aaf9ed124f25f0f8452edf238f087ad50 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Fri, 10 Jan 2014 16:18:26 +0800 Subject: net: core: explicitly select a txq before doing l2 forwarding Currently, the tx queue were selected implicitly in ndo_dfwd_start_xmit(). The will cause several issues: - NETIF_F_LLTX were removed for macvlan, so txq lock were done for macvlan instead of lower device which misses the necessary txq synchronization for lower device such as txq stopping or frozen required by dev watchdog or control path. - dev_hard_start_xmit() was called with NULL txq which bypasses the net device watchdog. - dev_hard_start_xmit() does not check txq everywhere which will lead a crash when tso is disabled for lower device. Fix this by explicitly introducing a new param for .ndo_select_queue() for just selecting queues in the case of l2 forwarding offload. netdev_pick_tx() was also extended to accept this parameter and dev_queue_xmit_accel() was used to do l2 forwarding transmission. With this fixes, NETIF_F_LLTX could be preserved for macvlan and there's no need to check txq against NULL in dev_hard_start_xmit(). Also there's no need to keep a dedicated ndo_dfwd_start_xmit() and we can just reuse the code of dev_queue_xmit() to do the transmission. In the future, it was also required for macvtap l2 forwarding support since it provides a necessary synchronization method. Cc: John Fastabend Cc: Neil Horman Cc: e1000-devel@lists.sourceforge.net Signed-off-by: Jason Wang Acked-by: Neil Horman Acked-by: John Fastabend Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 3 ++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 ++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 3 ++- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 33 ++++++++++--------------- drivers/net/ethernet/lantiq_etop.c | 3 ++- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 3 ++- drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 3 ++- drivers/net/ethernet/tile/tilegx.c | 3 ++- drivers/net/macvlan.c | 9 +++---- drivers/net/team/team.c | 3 ++- drivers/net/tun.c | 3 ++- drivers/net/wireless/mwifiex/main.c | 3 ++- drivers/staging/bcm/Bcmnet.c | 3 ++- drivers/staging/netlogic/xlr_net.c | 3 ++- drivers/staging/rtl8188eu/os_dep/os_intfs.c | 3 ++- include/linux/netdevice.h | 12 ++++++--- net/core/dev.c | 29 +++++++++++++--------- net/core/flow_dissector.c | 10 +++++--- net/core/netpoll.c | 2 +- net/mac80211/iface.c | 6 +++-- net/sched/sch_generic.c | 2 +- 21 files changed, 80 insertions(+), 62 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 398e299ee1bd..4b8c58b0ec24 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3732,7 +3732,8 @@ static inline int bond_slave_override(struct bonding *bond, } -static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 550412088dd0..bf811565ee24 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1833,7 +1833,8 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw) bnx2x_napi_disable_cnic(bp); } -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index da8fcaa74495..41f3ca5ad972 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -524,7 +524,8 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos); /* select_queue callback */ -u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv); static inline void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cc06854296a3..5bcc870f8367 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6827,12 +6827,20 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) return __ixgbe_maybe_stop_tx(tx_ring, size); } -#ifdef IXGBE_FCOE -static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { + struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; +#ifdef IXGBE_FCOE struct ixgbe_adapter *adapter; struct ixgbe_ring_feature *f; int txq; +#endif + + if (fwd_adapter) + return skb->queue_mapping + fwd_adapter->tx_base_queue; + +#ifdef IXGBE_FCOE /* * only execute the code below if protocol is FCoE @@ -6858,9 +6866,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) txq -= f->indices; return txq + f->offset; +#else + return __netdev_pick_tx(dev, skb); +#endif } -#endif netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring) @@ -7629,27 +7639,11 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv) kfree(fwd_adapter); } -static netdev_tx_t ixgbe_fwd_xmit(struct sk_buff *skb, - struct net_device *dev, - void *priv) -{ - struct ixgbe_fwd_adapter *fwd_adapter = priv; - unsigned int queue; - struct ixgbe_ring *tx_ring; - - queue = skb->queue_mapping + fwd_adapter->tx_base_queue; - tx_ring = fwd_adapter->real_adapter->tx_ring[queue]; - - return __ixgbe_xmit_frame(skb, dev, tx_ring); -} - static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, .ndo_start_xmit = ixgbe_xmit_frame, -#ifdef IXGBE_FCOE .ndo_select_queue = ixgbe_select_queue, -#endif .ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ixgbe_set_mac, @@ -7689,7 +7683,6 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, .ndo_dfwd_add_station = ixgbe_fwd_add, .ndo_dfwd_del_station = ixgbe_fwd_del, - .ndo_dfwd_start_xmit = ixgbe_fwd_xmit, }; /** diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 6a6c1f76d8e0..ec94a20d7099 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -619,7 +619,8 @@ ltq_etop_set_multicast_list(struct net_device *dev) } static u16 -ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb) +ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { /* we are currently only using the first queue */ return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index f54ebd5a1702..a7fcd593b2db 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -592,7 +592,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk } } -u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct mlx4_en_priv *priv = netdev_priv(dev); u16 rings_p_up = priv->num_tx_rings_p_up; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index f3758de59c05..d5758adceaa2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -714,7 +714,8 @@ int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); void mlx4_en_tx_irq(struct mlx4_cq *mcq); -u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); +u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv); netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index 628b736e5ae7..0e9fb3301b11 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -2080,7 +2080,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) } /* Return subqueue id on this core (one per core). */ -static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { return smp_processor_id(); } diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 5360f73c9817..bc8faaec33f5 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -299,7 +299,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, if (vlan->fwd_priv) { skb->dev = vlan->lowerdev; - ret = dev_hard_start_xmit(skb, skb->dev, NULL, vlan->fwd_priv); + ret = dev_queue_xmit_accel(skb, vlan->fwd_priv); } else { ret = macvlan_queue_xmit(skb, dev); } @@ -365,10 +365,8 @@ static int macvlan_open(struct net_device *dev) */ if (IS_ERR_OR_NULL(vlan->fwd_priv)) { vlan->fwd_priv = NULL; - } else { - dev->features &= ~NETIF_F_LLTX; + } else return 0; - } } err = -EBUSY; @@ -702,8 +700,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, features = netdev_increment_features(vlan->lowerdev->features, features, mask); - if (!vlan->fwd_priv) - features |= NETIF_F_LLTX; + features |= NETIF_F_LLTX; return features; } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 736050d6b451..b75ae5bde673 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1647,7 +1647,8 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { /* * This helper function exists to help dev_pick_tx get the correct diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 7c8343a4f918..ecec8029c5e8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -348,7 +348,8 @@ unlock: * different rxq no. here. If we could not get rxhash, then we would * hope the rxq no. may help here. */ -static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct tun_struct *tun = netdev_priv(dev); struct tun_flow_entry *e; diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c index 78e8a6666cc6..8bb8988c435c 100644 --- a/drivers/net/wireless/mwifiex/main.c +++ b/drivers/net/wireless/mwifiex/main.c @@ -746,7 +746,8 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev) } static u16 -mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb) +mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { skb->priority = cfg80211_classify8021d(skb); return mwifiex_1d_to_wmm_queue[skb->priority]; diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c index 53fee2f9a498..8dfdd2732bdc 100644 --- a/drivers/staging/bcm/Bcmnet.c +++ b/drivers/staging/bcm/Bcmnet.c @@ -39,7 +39,8 @@ static INT bcm_close(struct net_device *dev) return 0; } -static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { return ClassifyPacket(netdev_priv(dev), skb); } diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c index 235d2b1ec593..eedffed17e39 100644 --- a/drivers/staging/netlogic/xlr_net.c +++ b/drivers/staging/netlogic/xlr_net.c @@ -306,7 +306,8 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } -static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb) +static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb, + void *accel_priv) { return (u16)smp_processor_id(); } diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c index 17659bb04bef..dd69e344e409 100644 --- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c +++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c @@ -652,7 +652,8 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb) return dscp >> 5; } -static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv) { struct adapter *padapter = rtw_netdev_priv(dev); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5faaadb0c74f..ce2a1f5f9a1e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -769,7 +769,8 @@ struct netdev_phys_port_id { * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) * Required can not be NULL. * - * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); + * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb, + * void *accel_priv); * Called to decide which queue to when device supports multiple * transmit queues. * @@ -990,7 +991,8 @@ struct net_device_ops { netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, struct net_device *dev); u16 (*ndo_select_queue)(struct net_device *dev, - struct sk_buff *skb); + struct sk_buff *skb, + void *accel_priv); void (*ndo_change_rx_flags)(struct net_device *dev, int flags); void (*ndo_set_rx_mode)(struct net_device *dev); @@ -1529,7 +1531,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev, } struct netdev_queue *netdev_pick_tx(struct net_device *dev, - struct sk_buff *skb); + struct sk_buff *skb, + void *accel_priv); u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); /* @@ -1819,6 +1822,7 @@ int dev_close(struct net_device *dev); void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct sk_buff *newskb); int dev_queue_xmit(struct sk_buff *skb); +int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv); int register_netdevice(struct net_device *dev); void unregister_netdevice_queue(struct net_device *dev, struct list_head *head); void unregister_netdevice_many(struct list_head *head); @@ -2426,7 +2430,7 @@ int dev_change_carrier(struct net_device *, bool new_carrier); int dev_get_phys_port_id(struct net_device *dev, struct netdev_phys_port_id *ppid); int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq, void *accel_priv); + struct netdev_queue *txq); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); extern int netdev_budget; diff --git a/net/core/dev.c b/net/core/dev.c index 4fc17221545d..0ce469e5ec80 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2539,7 +2539,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb, } int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, - struct netdev_queue *txq, void *accel_priv) + struct netdev_queue *txq) { const struct net_device_ops *ops = dev->netdev_ops; int rc = NETDEV_TX_OK; @@ -2605,13 +2605,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, dev_queue_xmit_nit(skb, dev); skb_len = skb->len; - if (accel_priv) - rc = ops->ndo_dfwd_start_xmit(skb, dev, accel_priv); - else rc = ops->ndo_start_xmit(skb, dev); trace_net_dev_xmit(skb, rc, dev, skb_len); - if (rc == NETDEV_TX_OK && txq) + if (rc == NETDEV_TX_OK) txq_trans_update(txq); return rc; } @@ -2627,10 +2624,7 @@ gso: dev_queue_xmit_nit(nskb, dev); skb_len = nskb->len; - if (accel_priv) - rc = ops->ndo_dfwd_start_xmit(nskb, dev, accel_priv); - else - rc = ops->ndo_start_xmit(nskb, dev); + rc = ops->ndo_start_xmit(nskb, dev); trace_net_dev_xmit(nskb, rc, dev, skb_len); if (unlikely(rc != NETDEV_TX_OK)) { if (rc & ~NETDEV_TX_MASK) @@ -2811,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit); * the BH enable code must have IRQs enabled so that it will not deadlock. * --BLG */ -int dev_queue_xmit(struct sk_buff *skb) +int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) { struct net_device *dev = skb->dev; struct netdev_queue *txq; @@ -2827,7 +2821,7 @@ int dev_queue_xmit(struct sk_buff *skb) skb_update_prio(skb); - txq = netdev_pick_tx(dev, skb); + txq = netdev_pick_tx(dev, skb, accel_priv); q = rcu_dereference_bh(txq->qdisc); #ifdef CONFIG_NET_CLS_ACT @@ -2863,7 +2857,7 @@ int dev_queue_xmit(struct sk_buff *skb) if (!netif_xmit_stopped(txq)) { __this_cpu_inc(xmit_recursion); - rc = dev_hard_start_xmit(skb, dev, txq, NULL); + rc = dev_hard_start_xmit(skb, dev, txq); __this_cpu_dec(xmit_recursion); if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); @@ -2892,8 +2886,19 @@ out: rcu_read_unlock_bh(); return rc; } + +int dev_queue_xmit(struct sk_buff *skb) +{ + return __dev_queue_xmit(skb, NULL); +} EXPORT_SYMBOL(dev_queue_xmit); +int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv) +{ + return __dev_queue_xmit(skb, accel_priv); +} +EXPORT_SYMBOL(dev_queue_xmit_accel); + /*======================================================================= Receiver routines diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d6ef17322500..2fc5beaf5783 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -395,17 +395,21 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) EXPORT_SYMBOL(__netdev_pick_tx); struct netdev_queue *netdev_pick_tx(struct net_device *dev, - struct sk_buff *skb) + struct sk_buff *skb, + void *accel_priv) { int queue_index = 0; if (dev->real_num_tx_queues != 1) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue) - queue_index = ops->ndo_select_queue(dev, skb); + queue_index = ops->ndo_select_queue(dev, skb, + accel_priv); else queue_index = __netdev_pick_tx(dev, skb); - queue_index = dev_cap_txqueue(dev, queue_index); + + if (!accel_priv) + queue_index = dev_cap_txqueue(dev, queue_index); } skb_set_queue_mapping(skb, queue_index); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 303097874633..19fe9c717ced 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -375,7 +375,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { struct netdev_queue *txq; - txq = netdev_pick_tx(dev, skb); + txq = netdev_pick_tx(dev, skb, NULL); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 36c3a4cbcabf..a0757913046e 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1061,7 +1061,8 @@ static void ieee80211_uninit(struct net_device *dev) } static u16 ieee80211_netdev_select_queue(struct net_device *dev, - struct sk_buff *skb) + struct sk_buff *skb, + void *accel_priv) { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); } @@ -1078,7 +1079,8 @@ static const struct net_device_ops ieee80211_dataif_ops = { }; static u16 ieee80211_monitor_select_queue(struct net_device *dev, - struct sk_buff *skb) + struct sk_buff *skb, + void *accel_priv) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 922a09406ba7..7fc899a943a8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -126,7 +126,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_xmit_frozen_or_stopped(txq)) - ret = dev_hard_start_xmit(skb, dev, txq, NULL); + ret = dev_hard_start_xmit(skb, dev, txq); HARD_TX_UNLOCK(dev, txq); -- cgit v1.2.3 From 1ac6762a0b8f5f2dc35ea869a08da25c68d7a8ba Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Thu, 9 Jan 2014 12:41:04 -0500 Subject: qlcnic: Fix bug in TX statistics o Driver was not updating TX stats so it was not populating statistics in `ifconfig` command output. Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | 3 +++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index ff80cd8f6d2b..f2a7c7166e24 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1711,6 +1711,7 @@ int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *); void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *); void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx); void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx); +void qlcnic_update_stats(struct qlcnic_adapter *); /* Adapter hardware abstraction */ struct qlcnic_hardware_ops { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index e3be2760665c..7b36340a1b34 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1267,7 +1267,7 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type) return data; } -static void qlcnic_update_stats(struct qlcnic_adapter *adapter) +void qlcnic_update_stats(struct qlcnic_adapter *adapter) { struct qlcnic_host_tx_ring *tx_ring; int ring; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index b8a245a79de3..550791b8fbae 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2780,6 +2780,9 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) struct qlcnic_adapter *adapter = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) + qlcnic_update_stats(adapter); + stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; stats->tx_packets = adapter->stats.xmitfinished; stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes; -- cgit v1.2.3 From d6e9c89a8d3cf0a5184badbcd50169179af27721 Mon Sep 17 00:00:00 2001 From: Shahed Shaikh Date: Thu, 9 Jan 2014 12:41:05 -0500 Subject: qlcnic: Fix ethtool statistics length calculation o Consider number of Tx queues while calculating the length of Tx statistics as part of ethtool stats. o Calculate statistics lenght properly for 82xx and 83xx adapter Signed-off-by: Shahed Shaikh Signed-off-by: David S. Miller --- .../net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 39 ++++++++++++---------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 7b36340a1b34..6b08194aa0d4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -167,27 +167,35 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { #define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) -static inline int qlcnic_82xx_statistics(void) +static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter) { - return ARRAY_SIZE(qlcnic_device_gstrings_stats) + - ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); + return ARRAY_SIZE(qlcnic_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; } -static inline int qlcnic_83xx_statistics(void) +static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter) { - return ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + + return ARRAY_SIZE(qlcnic_gstrings_stats) + + ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) + - ARRAY_SIZE(qlcnic_83xx_rx_stats_strings); + ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) + + QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings; } static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter) { - if (qlcnic_82xx_check(adapter)) - return qlcnic_82xx_statistics(); - else if (qlcnic_83xx_check(adapter)) - return qlcnic_83xx_statistics(); - else - return -1; + int len = -1; + + if (qlcnic_82xx_check(adapter)) { + len = qlcnic_82xx_statistics(adapter); + if (adapter->flags & QLCNIC_ESWITCH_ENABLED) + len += ARRAY_SIZE(qlcnic_device_gstrings_stats); + } else if (qlcnic_83xx_check(adapter)) { + len = qlcnic_83xx_statistics(adapter); + } + + return len; } #define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412 @@ -920,18 +928,13 @@ static int qlcnic_eeprom_test(struct net_device *dev) static int qlcnic_get_sset_count(struct net_device *dev, int sset) { - int len; struct qlcnic_adapter *adapter = netdev_priv(dev); switch (sset) { case ETH_SS_TEST: return QLCNIC_TEST_LEN; case ETH_SS_STATS: - len = qlcnic_dev_statistics_len(adapter) + QLCNIC_STATS_LEN; - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) || - qlcnic_83xx_check(adapter)) - return len; - return qlcnic_82xx_statistics(); + return qlcnic_dev_statistics_len(adapter); default: return -EOPNOTSUPP; } -- cgit v1.2.3