diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 14:31:10 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 14:31:10 -0800 |
commit | b2fe5fa68642860e7de76167c3111623aa0d5de1 (patch) | |
tree | b7f9b89b7039ecefbc35fe3c8e73a6ff972641dd /drivers/net/ethernet/nvidia/forcedeth.c | |
parent | a103950e0dd2058df5e8a8d4a915707bdcf205f0 (diff) | |
parent | a54667f6728c2714a400f3c884727da74b6d1717 (diff) | |
download | linux-stable-b2fe5fa68642860e7de76167c3111623aa0d5de1.tar.gz linux-stable-b2fe5fa68642860e7de76167c3111623aa0d5de1.tar.bz2 linux-stable-b2fe5fa68642860e7de76167c3111623aa0d5de1.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Significantly shrink the core networking routing structures. Result
of http://vger.kernel.org/~davem/seoul2017_netdev_keynote.pdf
2) Add netdevsim driver for testing various offloads, from Jakub
Kicinski.
3) Support cross-chip FDB operations in DSA, from Vivien Didelot.
4) Add a 2nd listener hash table for TCP, similar to what was done for
UDP. From Martin KaFai Lau.
5) Add eBPF based queue selection to tun, from Jason Wang.
6) Lockless qdisc support, from John Fastabend.
7) SCTP stream interleave support, from Xin Long.
8) Smoother TCP receive autotuning, from Eric Dumazet.
9) Lots of erspan tunneling enhancements, from William Tu.
10) Add true function call support to BPF, from Alexei Starovoitov.
11) Add explicit support for GRO HW offloading, from Michael Chan.
12) Support extack generation in more netlink subsystems. From Alexander
Aring, Quentin Monnet, and Jakub Kicinski.
13) Add 1000BaseX, flow control, and EEE support to mvneta driver. From
Russell King.
14) Add flow table abstraction to netfilter, from Pablo Neira Ayuso.
15) Many improvements and simplifications to the NFP driver bpf JIT,
from Jakub Kicinski.
16) Support for ipv6 non-equal cost multipath routing, from Ido
Schimmel.
17) Add resource abstration to devlink, from Arkadi Sharshevsky.
18) Packet scheduler classifier shared filter block support, from Jiri
Pirko.
19) Avoid locking in act_csum, from Davide Caratti.
20) devinet_ioctl() simplifications from Al viro.
21) More TCP bpf improvements from Lawrence Brakmo.
22) Add support for onlink ipv6 route flag, similar to ipv4, from David
Ahern.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1925 commits)
tls: Add support for encryption using async offload accelerator
ip6mr: fix stale iterator
net/sched: kconfig: Remove blank help texts
openvswitch: meter: Use 64-bit arithmetic instead of 32-bit
tcp_nv: fix potential integer overflow in tcpnv_acked
r8169: fix RTL8168EP take too long to complete driver initialization.
qmi_wwan: Add support for Quectel EP06
rtnetlink: enable IFLA_IF_NETNSID for RTM_NEWLINK
ipmr: Fix ptrdiff_t print formatting
ibmvnic: Wait for device response when changing MAC
qlcnic: fix deadlock bug
tcp: release sk_frag.page in tcp_disconnect
ipv4: Get the address of interface correctly.
net_sched: gen_estimator: fix lockdep splat
net: macb: Handle HRESP error
net/mlx5e: IPoIB, Fix copy-paste bug in flow steering refactoring
ipv6: addrconf: break critical section in addrconf_verify_rtnl()
ipv6: change route cache aging logic
i40e/i40evf: Update DESC_NEEDED value to reflect larger value
bnxt_en: cleanup DIM work on device shutdown
...
Diffstat (limited to 'drivers/net/ethernet/nvidia/forcedeth.c')
-rw-r--r-- | drivers/net/ethernet/nvidia/forcedeth.c | 94 |
1 files changed, 48 insertions, 46 deletions
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 481876b5424c..66c665d0b926 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -793,9 +793,9 @@ struct fe_priv { /* rx specific fields. * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); */ - union ring_type get_rx, put_rx, first_rx, last_rx; + union ring_type get_rx, put_rx, last_rx; struct nv_skb_map *get_rx_ctx, *put_rx_ctx; - struct nv_skb_map *first_rx_ctx, *last_rx_ctx; + struct nv_skb_map *last_rx_ctx; struct nv_skb_map *rx_skb; union ring_type rx_ring; @@ -822,9 +822,9 @@ struct fe_priv { /* * tx specific fields. */ - union ring_type get_tx, put_tx, first_tx, last_tx; + union ring_type get_tx, put_tx, last_tx; struct nv_skb_map *get_tx_ctx, *put_tx_ctx; - struct nv_skb_map *first_tx_ctx, *last_tx_ctx; + struct nv_skb_map *last_tx_ctx; struct nv_skb_map *tx_skb; union ring_type tx_ring; @@ -1812,12 +1812,12 @@ static int nv_alloc_rx(struct net_device *dev) struct ring_desc *less_rx; less_rx = np->get_rx.orig; - if (less_rx-- == np->first_rx.orig) + if (less_rx-- == np->rx_ring.orig) less_rx = np->last_rx.orig; while (np->put_rx.orig != less_rx) { struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); - if (skb) { + if (likely(skb)) { np->put_rx_ctx->skb = skb; np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, @@ -1833,9 +1833,9 @@ static int nv_alloc_rx(struct net_device *dev) wmb(); np->put_rx.orig->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); if (unlikely(np->put_rx.orig++ == np->last_rx.orig)) - np->put_rx.orig = np->first_rx.orig; + np->put_rx.orig = np->rx_ring.orig; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) - np->put_rx_ctx = np->first_rx_ctx; + np->put_rx_ctx = np->rx_skb; } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); @@ -1853,12 +1853,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev) struct ring_desc_ex *less_rx; less_rx = np->get_rx.ex; - if (less_rx-- == np->first_rx.ex) + if (less_rx-- == np->rx_ring.ex) less_rx = np->last_rx.ex; while (np->put_rx.ex != less_rx) { struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + NV_RX_ALLOC_PAD); - if (skb) { + if (likely(skb)) { np->put_rx_ctx->skb = skb; np->put_rx_ctx->dma = dma_map_single(&np->pci_dev->dev, skb->data, @@ -1875,9 +1875,9 @@ static int nv_alloc_rx_optimized(struct net_device *dev) wmb(); np->put_rx.ex->flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); if (unlikely(np->put_rx.ex++ == np->last_rx.ex)) - np->put_rx.ex = np->first_rx.ex; + np->put_rx.ex = np->rx_ring.ex; if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) - np->put_rx_ctx = np->first_rx_ctx; + np->put_rx_ctx = np->rx_skb; } else { packet_dropped: u64_stats_update_begin(&np->swstats_rx_syncp); @@ -1903,13 +1903,15 @@ static void nv_init_rx(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); int i; - np->get_rx = np->put_rx = np->first_rx = np->rx_ring; + np->get_rx = np->rx_ring; + np->put_rx = np->rx_ring; if (!nv_optimized(np)) np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; else np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; - np->get_rx_ctx = np->put_rx_ctx = np->first_rx_ctx = np->rx_skb; + np->get_rx_ctx = np->rx_skb; + np->put_rx_ctx = np->rx_skb; np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; for (i = 0; i < np->rx_ring_size; i++) { @@ -1932,13 +1934,15 @@ static void nv_init_tx(struct net_device *dev) struct fe_priv *np = netdev_priv(dev); int i; - np->get_tx = np->put_tx = np->first_tx = np->tx_ring; + np->get_tx = np->tx_ring; + np->put_tx = np->tx_ring; if (!nv_optimized(np)) np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; else np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; - np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb; + np->get_tx_ctx = np->tx_skb; + np->put_tx_ctx = np->tx_skb; np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1]; netdev_reset_queue(np->dev); np->tx_pkts_in_progress = 0; @@ -2248,9 +2252,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) offset += bcnt; size -= bcnt; if (unlikely(put_tx++ == np->last_tx.orig)) - put_tx = np->first_tx.orig; + put_tx = np->tx_ring.orig; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) - np->put_tx_ctx = np->first_tx_ctx; + np->put_tx_ctx = np->tx_skb; } while (size); /* setup the fragments */ @@ -2276,7 +2280,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) do { nv_unmap_txskb(np, start_tx_ctx); if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) - tmp_tx_ctx = np->first_tx_ctx; + tmp_tx_ctx = np->tx_skb; } while (tmp_tx_ctx != np->put_tx_ctx); dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; @@ -2294,18 +2298,18 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev) offset += bcnt; frag_size -= bcnt; if (unlikely(put_tx++ == np->last_tx.orig)) - put_tx = np->first_tx.orig; + put_tx = np->tx_ring.orig; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) - np->put_tx_ctx = np->first_tx_ctx; + np->put_tx_ctx = np->tx_skb; } while (frag_size); } - if (unlikely(put_tx == np->first_tx.orig)) + if (unlikely(put_tx == np->tx_ring.orig)) prev_tx = np->last_tx.orig; else prev_tx = put_tx - 1; - if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) + if (unlikely(np->put_tx_ctx == np->tx_skb)) prev_tx_ctx = np->last_tx_ctx; else prev_tx_ctx = np->put_tx_ctx - 1; @@ -2406,9 +2410,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, offset += bcnt; size -= bcnt; if (unlikely(put_tx++ == np->last_tx.ex)) - put_tx = np->first_tx.ex; + put_tx = np->tx_ring.ex; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) - np->put_tx_ctx = np->first_tx_ctx; + np->put_tx_ctx = np->tx_skb; } while (size); /* setup the fragments */ @@ -2434,7 +2438,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, do { nv_unmap_txskb(np, start_tx_ctx); if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx)) - tmp_tx_ctx = np->first_tx_ctx; + tmp_tx_ctx = np->tx_skb; } while (tmp_tx_ctx != np->put_tx_ctx); dev_kfree_skb_any(skb); np->put_tx_ctx = start_tx_ctx; @@ -2452,18 +2456,18 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb, offset += bcnt; frag_size -= bcnt; if (unlikely(put_tx++ == np->last_tx.ex)) - put_tx = np->first_tx.ex; + put_tx = np->tx_ring.ex; if (unlikely(np->put_tx_ctx++ == np->last_tx_ctx)) - np->put_tx_ctx = np->first_tx_ctx; + np->put_tx_ctx = np->tx_skb; } while (frag_size); } - if (unlikely(put_tx == np->first_tx.ex)) + if (unlikely(put_tx == np->tx_ring.ex)) prev_tx = np->last_tx.ex; else prev_tx = put_tx - 1; - if (unlikely(np->put_tx_ctx == np->first_tx_ctx)) + if (unlikely(np->put_tx_ctx == np->tx_skb)) prev_tx_ctx = np->last_tx_ctx; else prev_tx_ctx = np->put_tx_ctx - 1; @@ -2563,7 +2567,7 @@ static int nv_tx_done(struct net_device *dev, int limit) if (np->desc_ver == DESC_VER_1) { if (flags & NV_TX_LASTPACKET) { - if (flags & NV_TX_ERROR) { + if (unlikely(flags & NV_TX_ERROR)) { if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); @@ -2580,7 +2584,7 @@ static int nv_tx_done(struct net_device *dev, int limit) } } else { if (flags & NV_TX2_LASTPACKET) { - if (flags & NV_TX2_ERROR) { + if (unlikely(flags & NV_TX2_ERROR)) { if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) nv_legacybackoff_reseed(dev); @@ -2597,9 +2601,9 @@ static int nv_tx_done(struct net_device *dev, int limit) } } if (unlikely(np->get_tx.orig++ == np->last_tx.orig)) - np->get_tx.orig = np->first_tx.orig; + np->get_tx.orig = np->tx_ring.orig; if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) - np->get_tx_ctx = np->first_tx_ctx; + np->get_tx_ctx = np->tx_skb; } netdev_completed_queue(np->dev, tx_work, bytes_compl); @@ -2626,7 +2630,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) nv_unmap_txskb(np, np->get_tx_ctx); if (flags & NV_TX2_LASTPACKET) { - if (flags & NV_TX2_ERROR) { + if (unlikely(flags & NV_TX2_ERROR)) { if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) { if (np->driver_data & DEV_HAS_GEAR_MODE) @@ -2651,9 +2655,9 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit) } if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) - np->get_tx.ex = np->first_tx.ex; + np->get_tx.ex = np->tx_ring.ex; if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx)) - np->get_tx_ctx = np->first_tx_ctx; + np->get_tx_ctx = np->tx_skb; } netdev_completed_queue(np->dev, tx_work, bytes_cleaned); @@ -2909,9 +2913,9 @@ static int nv_rx_process(struct net_device *dev, int limit) u64_stats_update_end(&np->swstats_rx_syncp); next_pkt: if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) - np->get_rx.orig = np->first_rx.orig; + np->get_rx.orig = np->rx_ring.orig; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) - np->get_rx_ctx = np->first_rx_ctx; + np->get_rx_ctx = np->rx_skb; rx_work++; } @@ -2998,9 +3002,9 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit) } next_pkt: if (unlikely(np->get_rx.ex++ == np->last_rx.ex)) - np->get_rx.ex = np->first_rx.ex; + np->get_rx.ex = np->rx_ring.ex; if (unlikely(np->get_rx_ctx++ == np->last_rx_ctx)) - np->get_rx_ctx = np->first_rx_ctx; + np->get_rx_ctx = np->rx_skb; rx_work++; } @@ -5507,11 +5511,9 @@ static int nv_open(struct net_device *dev) /* One manual link speed update: Interrupts are enabled, future link * speed changes cause interrupts and are handled by nv_link_irq(). */ - { - u32 miistat; - miistat = readl(base + NvRegMIIStatus); - writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); - } + readl(base + NvRegMIIStatus); + writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); + /* set linkspeed to invalid value, thus force nv_update_linkspeed * to init hw */ np->linkspeed = 0; |