From bf4d67d94c842edf57e3cac2c4dff58a9ce7ac41 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Tue, 20 Oct 2015 13:28:17 -0700 Subject: ixgbe: Reset interface after enabling SR-IOV Enabling SR-IOV and then bringing the interface up was resulting in the PF MAC addresses getting into a bad state. Specifically the MAC address was enabled for both VF 0 and the PF. This resulted in some odd behaviors such as VF 0 receiving a copy of the PFs traffic, which in turn enables the ability for VF 0 to spoof the PF. A workaround for this issue appears to be to bring up the interface first and then enable SR-IOV as this way the reset is then triggered in the existing code. In order to correct this I have added a change to ixgbe_setup_tc where if the interface is down we still will at least call ixgbe_reset so that the MAC addresses for the device are reset to the correct pools. Steps to reproduce issue: modprobe ixgbe echo 7 > /sys/bus/pci/devices/0000\:01\:00.1/sriov_numvfs ifconfig enp1s0f1 up ethregs -s 1:00.1 | grep MPSAR | grep -v 00000000 Result: MPSAR[0] 00000081 MPSAR[254] 00000001 Expected Result, behavior after patch: MPSAR[0] 00000080 MPSAR[254] 00000080 Signed-off-by: Alexander Duyck Tested-by: Darin Miller Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 47395ff5d908..aed8d029b23d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -7920,6 +7920,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) */ if (netif_running(dev)) ixgbe_close(dev); + else + ixgbe_reset(adapter); + ixgbe_clear_interrupt_scheme(adapter); #ifdef CONFIG_IXGBE_DCB -- cgit v1.2.3 From 8ddb33268902c80ecd9a0e1bc766a2dc4bc9fede Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 18 Nov 2015 15:47:06 -0800 Subject: i40e/i40evf: avoid mutex re-init If the driver were to happen to have a mutex held while the i40e_init_adminq call was called, the init_adminq might inadvertently call mutex_init on a lock that was held which is a violation of the calling semantics. Fix this by avoiding adminq.c code allocating/freeing this memory, and then do the same work only once in probe/remove. Testing Hints (Required if no HSD): for VF, load i40evf in bare metal and echo 32 > sriov_numvfs; echo 0 > sriov_numvfs in a loop. Yes this is a horrible thing to do. Change-ID: Ida263c51b34e195252179e7e5e400d73a99be7a2 Reported-by: Stefan Assmann Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 6 ------ drivers/net/ethernet/intel/i40e/i40e_main.c | 11 ++++++++++- drivers/net/ethernet/intel/i40evf/i40e_adminq.c | 6 ------ drivers/net/ethernet/intel/i40evf/i40evf_main.c | 10 ++++++++++ 4 files changed, 20 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 0ff8f01e57ee..1fd5ea82a9bc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* initialize locks */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - /* Set up register offsets */ i40e_adminq_init_regs(hw); @@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - /* destroy the locks */ - if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b825f978d441..4a9873ec28c7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10295,6 +10295,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up a default setting for link flow control */ pf->hw.fc.requested_mode = I40E_FC_NONE; + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + err = i40e_init_adminq(hw); /* provide nvm, fw, api versions */ @@ -10697,7 +10703,6 @@ static void i40e_remove(struct pci_dev *pdev) set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); - i40e_fdir_teardown(pf); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); @@ -10740,6 +10745,10 @@ static void i40e_remove(struct pci_dev *pdev) "Failed to destroy the Admin Queue resources: %d\n", ret_code); + /* destroy the locks only once, here */ + mutex_destroy(&hw->aq.arq_mutex); + mutex_destroy(&hw->aq.asq_mutex); + /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */ i40e_clear_interrupt_scheme(pf); for (i = 0; i < pf->num_alloc_vsi; i++) { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index fd123ca60761..3f65e39b3fe4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw) goto init_adminq_exit; } - /* initialize locks */ - mutex_init(&hw->aq.asq_mutex); - mutex_init(&hw->aq.arq_mutex); - /* Set up register offsets */ i40e_adminq_init_regs(hw); @@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - /* destroy the locks */ - if (hw->nvm_buff.va) i40e_free_virt_mem(hw, &hw->nvm_buff); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index d962164dfb0f..99d2cffae0cd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2476,6 +2476,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); + /* set up the locks for the AQ, do this only once in probe + * and destroy them only once in remove + */ + mutex_init(&hw->aq.asq_mutex); + mutex_init(&hw->aq.arq_mutex); + INIT_LIST_HEAD(&adapter->mac_filter_list); INIT_LIST_HEAD(&adapter->vlan_filter_list); @@ -2629,6 +2635,10 @@ static void i40evf_remove(struct pci_dev *pdev) if (hw->aq.asq.count) i40evf_shutdown_adminq(hw); + /* destroy the locks only once, here */ + mutex_destroy(&hw->aq.arq_mutex); + mutex_destroy(&hw->aq.asq_mutex); + iounmap(hw->hw_addr); pci_release_regions(pdev); -- cgit v1.2.3 From e864b4c7b184bde36fa6a02bb3190983d2f796f9 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 3 Dec 2015 15:20:49 +0100 Subject: net: mvpp2: fix missing DMA region unmap in egress processing The Tx descriptor release code currently calls dma_unmap_single() and dev_kfree_skb_any() if the descriptor is associated with a non-NULL skb. This condition is true only for the last fragment of the packet. Since every descriptor's buffer is DMA-mapped it has to be properly unmapped. Signed-off-by: Marcin Wojtas Fixes: 3f518509dedc ("ethernet: Add new driver for Marvell Armada 375 network unit") Cc: # v3.18+ Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index d9884fd15b45..95db519d9901 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -4401,11 +4401,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, mvpp2_txq_inc_get(txq_pcpu); - if (!skb) - continue; - dma_unmap_single(port->dev->dev.parent, buf_phys_addr, skb_headlen(skb), DMA_TO_DEVICE); + if (!skb) + continue; dev_kfree_skb_any(skb); } } -- cgit v1.2.3 From 4229d502ad3091e54c6f2e44d21dd8190881b49c Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 3 Dec 2015 15:20:50 +0100 Subject: net: mvpp2: fix buffers' DMA handling on RX path Each allocated buffer, whose pointer is put into BM pool is DMA-mapped. Hence it should be properly unmapped after usage or when removing buffers from pool. This commit fixes DMA handling on RX path by adding dma_unmap_single() in mvpp2_rx() and in mvpp2_bufs_free(). The latter function's argument number had to be increased for this purpose. Signed-off-by: Marcin Wojtas Fixes: 3f518509dedc ("ethernet: Add new driver for Marvell Armada 375 network unit") Cc: # v3.18+ Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 95db519d9901..eaef46169f0a 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, } /* Free all buffers from the pool */ -static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool) +static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool) { int i; for (i = 0; i < bm_pool->buf_num; i++) { + dma_addr_t buf_phys_addr; u32 vaddr; /* Get buffer virtual address (indirect access) */ - mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); + buf_phys_addr = mvpp2_read(priv, + MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG); + + dma_unmap_single(dev, buf_phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + if (!vaddr) break; dev_kfree_skb_any((struct sk_buff *)vaddr); @@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev, { u32 val; - mvpp2_bm_bufs_free(priv, bm_pool); + mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool); if (bm_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); return 0; @@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, MVPP2_BM_LONG_BUF_NUM : MVPP2_BM_SHORT_BUF_NUM; else - mvpp2_bm_bufs_free(port->priv, new_pool); + mvpp2_bm_bufs_free(port->dev->dev.parent, + port->priv, new_pool); new_pool->pkt_size = pkt_size; @@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu) int pkt_size = MVPP2_RX_PKT_SIZE(mtu); /* Update BM pool with new buffer size */ - mvpp2_bm_bufs_free(port->priv, port_pool); + mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool); if (port_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", port_pool->id); return -EIO; @@ -5136,6 +5144,9 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, skb = (struct sk_buff *)rx_desc->buf_cookie; + dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + rcvd_pkts++; rcvd_bytes += rx_bytes; atomic_inc(&bm_pool->in_use); -- cgit v1.2.3 From b5015854674b653d982f104936ec688e253469b9 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Thu, 3 Dec 2015 15:20:51 +0100 Subject: net: mvpp2: fix refilling BM pools in RX path In hitherto code in case of RX buffer allocation error during refill, original buffer is pushed to the network stack, but the amount of available buffer pointers in BM pool is decreased. This commit fixes the situation by moving refill call before skb_put(), and returning original buffer pointer to the pool in case of an error. Signed-off-by: Marcin Wojtas Fixes: 3f518509dedc ("ethernet: Add new driver for Marvell Armada 375 network unit") Cc: # v3.18+ Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvpp2.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index eaef46169f0a..a4beccf1fd46 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -5099,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, struct mvpp2_rx_queue *rxq) { struct net_device *dev = port->dev; - int rx_received, rx_filled, i; + int rx_received; + int rx_done = 0; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; @@ -5108,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, if (rx_todo > rx_received) rx_todo = rx_received; - rx_filled = 0; - for (i = 0; i < rx_todo; i++) { + while (rx_done < rx_todo) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); struct mvpp2_bm_pool *bm_pool; struct sk_buff *skb; + dma_addr_t phys_addr; u32 bm, rx_status; int pool, rx_bytes, err; - rx_filled++; + rx_done++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE; + phys_addr = rx_desc->buf_phys_addr; bm = mvpp2_bm_cookie_build(rx_desc); pool = mvpp2_bm_cookie_pool_get(bm); @@ -5135,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, * comprised by the RX descriptor. */ if (rx_status & MVPP2_RXD_ERR_SUMMARY) { + err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); + /* Return the buffer to the pool */ mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, rx_desc->buf_cookie); continue; @@ -5144,7 +5148,13 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, skb = (struct sk_buff *)rx_desc->buf_cookie; - dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, + err = mvpp2_rx_refill(port, bm_pool, bm, 0); + if (err) { + netdev_err(port->dev, "failed to refill BM pools\n"); + goto err_drop_frame; + } + + dma_unmap_single(dev->dev.parent, phys_addr, bm_pool->buf_size, DMA_FROM_DEVICE); rcvd_pkts++; @@ -5157,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, mvpp2_rx_csum(port, rx_status, skb); napi_gro_receive(&port->napi, skb); - - err = mvpp2_rx_refill(port, bm_pool, bm, 0); - if (err) { - netdev_err(port->dev, "failed to refill BM pools\n"); - rx_filled--; - } } if (rcvd_pkts) { @@ -5176,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, /* Update Rx queue management counters */ wmb(); - mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled); + mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done); return rx_todo; } -- cgit v1.2.3 From fe53985aaac83d516b38358d4f39921d9942a0e2 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Thu, 3 Dec 2015 16:49:32 +0100 Subject: pppoe: fix memory corruption in padt work structure pppoe_connect() mustn't touch the padt_work field of pppoe sockets because that work could be already pending. [ 21.473147] BUG: unable to handle kernel NULL pointer dereference at 00000004 [ 21.474523] IP: [] process_one_work+0x29/0x31c [ 21.475164] *pde = 00000000 [ 21.475513] Oops: 0000 [#1] SMP [ 21.475910] Modules linked in: pppoe pppox ppp_generic slhc crc32c_intel aesni_intel virtio_net xts aes_i586 lrw gf128mul ablk_helper cryptd evdev acpi_cpufreq processor serio_raw button ext4 crc16 mbcache jbd2 virtio_blk virtio_pci virtio_ring virtio [ 21.476168] CPU: 2 PID: 164 Comm: kworker/2:2 Not tainted 4.4.0-rc1 #1 [ 21.476168] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014 [ 21.476168] task: f5f83c00 ti: f5e28000 task.ti: f5e28000 [ 21.476168] EIP: 0060:[] EFLAGS: 00010046 CPU: 2 [ 21.476168] EIP is at process_one_work+0x29/0x31c [ 21.484082] EAX: 00000000 EBX: f678b2a0 ECX: 00000004 EDX: 00000000 [ 21.484082] ESI: f6c69940 EDI: f5e29ef0 EBP: f5e29f0c ESP: f5e29edc [ 21.484082] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068 [ 21.484082] CR0: 80050033 CR2: 000000a4 CR3: 317ad000 CR4: 00040690 [ 21.484082] Stack: [ 21.484082] 00000000 f6c69950 00000000 f6c69940 c0042338 f5e29f0c c1327945 00000000 [ 21.484082] 00000008 f678b2a0 f6c69940 f678b2b8 f5e29f30 c1043984 f5f83c00 f6c69970 [ 21.484082] f678b2a0 c10437d3 f6775e80 f678b2a0 c10437d3 f5e29fac c1047059 f5e29f74 [ 21.484082] Call Trace: [ 21.484082] [] ? _raw_spin_lock_irq+0x28/0x30 [ 21.484082] [] worker_thread+0x1b1/0x244 [ 21.484082] [] ? rescuer_thread+0x229/0x229 [ 21.484082] [] ? rescuer_thread+0x229/0x229 [ 21.484082] [] kthread+0x8f/0x94 [ 21.484082] [] ? _raw_spin_unlock_irq+0x22/0x26 [ 21.484082] [] ret_from_kernel_thread+0x21/0x38 [ 21.484082] [] ? kthread_parkme+0x19/0x19 [ 21.496082] Code: 5d c3 55 89 e5 57 56 53 89 c3 83 ec 24 89 d0 89 55 e0 8d 7d e4 e8 6c d8 ff ff b9 04 00 00 00 89 45 d8 8b 43 24 89 45 dc 8b 45 d8 <8b> 40 04 8b 80 e0 00 00 00 c1 e8 05 24 01 88 45 d7 8b 45 e0 8d [ 21.496082] EIP: [] process_one_work+0x29/0x31c SS:ESP 0068:f5e29edc [ 21.496082] CR2: 0000000000000004 [ 21.496082] ---[ end trace e362cc9cf10dae89 ]--- Reported-by: Andrew Fixes: 287f3a943fef ("pppoe: Use workqueue to die properly when a PADT is received") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/ppp/pppoe.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 5e0b43283bce..0a37f840fcc5 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -568,6 +568,9 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern) sk->sk_family = PF_PPPOX; sk->sk_protocol = PX_PROTO_OE; + INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work, + pppoe_unbind_sock_work); + return 0; } @@ -632,8 +635,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); - INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work); - error = -EINVAL; if (sp->sa_protocol != PX_PROTO_OE) goto end; @@ -663,8 +664,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, po->pppoe_dev = NULL; } - memset(sk_pppox(po) + 1, 0, - sizeof(struct pppox_sock) - sizeof(struct sock)); + po->pppoe_ifindex = 0; + memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa)); + memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay)); + memset(&po->chan, 0, sizeof(po->chan)); + po->next = NULL; + po->num = 0; + sk->sk_state = PPPOX_NONE; } -- cgit v1.2.3 From f2a3771ae8aca879c32336c76ad05a017629bae2 Mon Sep 17 00:00:00 2001 From: Pavel Machek Date: Fri, 4 Dec 2015 09:50:00 +0100 Subject: atl1c: Improve driver not to do order 4 GFP_ATOMIC allocation atl1c driver is doing order-4 allocation with GFP_ATOMIC priority. That often breaks networking after resume. Switch to GFP_KERNEL. Still not ideal, but should be significantly better. atl1c_setup_ring_resources() is called from .open() function, and already uses GFP_KERNEL, so this change is safe. Signed-off-by: Pavel Machek Acked-by: Michal Hocko Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 2795d6db10e1..8b5988e210d5 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 8 * 4; - ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, - &ring_header->dma); + ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, + &ring_header->dma, GFP_KERNEL); if (unlikely(!ring_header->desc)) { - dev_err(&pdev->dev, "pci_alloc_consistend failed\n"); + dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); goto err_nomem; } - memset(ring_header->desc, 0, ring_header->size); /* init TPD ring */ tpd_ring[0].dma = roundup(ring_header->dma, 8); -- cgit v1.2.3 From 248be83dcb3feb3f6332eb3d010a016402138484 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 4 Dec 2015 01:45:40 +0300 Subject: sh_eth: fix kernel oops in skb_put() In a low memory situation the following kernel oops occurs: Unable to handle kernel NULL pointer dereference at virtual address 00000050 pgd = 8490c000 [00000050] *pgd=4651e831, *pte=00000000, *ppte=00000000 Internal error: Oops: 17 [#1] PREEMPT ARM Modules linked in: CPU: 0 Not tainted (3.4-at16 #9) PC is at skb_put+0x10/0x98 LR is at sh_eth_poll+0x2c8/0xa10 pc : [<8035f780>] lr : [<8028bf50>] psr: 60000113 sp : 84eb1a90 ip : 84eb1ac8 fp : 84eb1ac4 r10: 0000003f r9 : 000005ea r8 : 00000000 r7 : 00000000 r6 : 940453b0 r5 : 00030000 r4 : 9381b180 r3 : 00000000 r2 : 00000000 r1 : 000005ea r0 : 00000000 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user Control: 10c53c7d Table: 4248c059 DAC: 00000015 Process klogd (pid: 2046, stack limit = 0x84eb02e8) [...] This is because netdev_alloc_skb() fails and 'mdp->rx_skbuff[entry]' is left NULL but sh_eth_rx() later uses it without checking. Add such check... Reported-by: Yasushi SHOJI Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index e7bab7909ed9..b1ebd7c7408c 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1462,6 +1462,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (mdp->cd->shift_rd0) desc_status >>= 16; + skb = mdp->rx_skbuff[entry]; if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | RD_RFS5 | RD_RFS6 | RD_RFS10)) { ndev->stats.rx_errors++; @@ -1477,12 +1478,11 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) ndev->stats.rx_missed_errors++; if (desc_status & RD_RFS10) ndev->stats.rx_over_errors++; - } else { + } else if (skb) { if (!mdp->cd->hw_swap) sh_eth_soft_swap( phys_to_virt(ALIGN(rxdesc->addr, 4)), pkt_len + 2); - skb = mdp->rx_skbuff[entry]; mdp->rx_skbuff[entry] = NULL; if (mdp->cd->rpadir) skb_reserve(skb, NET_IP_ALIGN); -- cgit v1.2.3 From b17c1d9a52b8b931e2f1019fda5d34ece621c5fd Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Fri, 4 Dec 2015 01:51:10 +0300 Subject: ravb: fix RX queue #1 frame error counter name The Rx queue #1 frame error counter name contains trailing underscore, probably due to a typo... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ed5da4d47668..b69e0c249c4f 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1037,7 +1037,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_queue_1_mcast_packets", "rx_queue_1_errors", "rx_queue_1_crc_errors", - "rx_queue_1_frame_errors_", + "rx_queue_1_frame_errors", "rx_queue_1_length_errors", "rx_queue_1_missed_errors", "rx_queue_1_over_errors", -- cgit v1.2.3 From ae79a639bb3dfd168dc8c1e5d6dfc471bdf6f284 Mon Sep 17 00:00:00 2001 From: Giuseppe CAVALLARO Date: Fri, 4 Dec 2015 07:21:06 +0100 Subject: stmmac: fix resource management when resume There is a memleak when suspend/resume this driver version. Currently the stmmac, during resume step, reallocates all the resources but they are not released when suspend. The patch is not to release these resources but the logic has been changed. In fact, it is not necessary to free and reallocate all from scratch because the memory data will be always preserved. As final solution, the patch just reinit the descriptors and the rx/tx pointers only when resume. Tested done on STi boxes. Reported-by: ZhengShunQian Signed-off-by: Giuseppe Cavallaro Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3c6549aee11d..a5b869eb4678 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3046,8 +3046,6 @@ int stmmac_suspend(struct net_device *ndev) priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr); - stmmac_clear_descriptors(priv); - /* Enable Power down mode by programming the PMT regs */ if (device_may_wakeup(priv->device)) { priv->hw->mac->pmt(priv->hw, priv->wolopts); @@ -3105,7 +3103,12 @@ int stmmac_resume(struct net_device *ndev) netif_device_attach(ndev); - init_dma_desc_rings(ndev, GFP_ATOMIC); + priv->cur_rx = 0; + priv->dirty_rx = 0; + priv->dirty_tx = 0; + priv->cur_tx = 0; + stmmac_clear_descriptors(priv); + stmmac_hw_setup(ndev, false); stmmac_init_tx_coalesce(priv); stmmac_set_rx_mode(ndev); -- cgit v1.2.3 From f8c0cfa5eca902d388c0b57c7ca29a1ff2e6d8c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= Date: Sat, 5 Dec 2015 13:01:50 +0100 Subject: net: cdc_mbim: add "NDP to end" quirk for Huawei E3372 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The Huawei E3372 (12d1:157d) needs this quirk in MBIM mode as well. Allow this by forcing the NTB to contain only a single NDP, and add a device specific entry for this ID. Due to the way Huawei use device IDs, this might be applied to other modems as well. It is assumed that those modems will be based on the same firmware and will need this quirk too. If not, it will still not harm normal usage, although multiplexing performance could be impacted. Cc: Enrico Mioso Reported-by: Sami Farin Signed-off-by: Bjørn Mork Acked-By: Enrico Mioso Signed-off-by: David S. Miller --- drivers/net/usb/cdc_mbim.c | 26 +++++++++++++++++++++++++- drivers/net/usb/cdc_ncm.c | 10 +++++++++- 2 files changed, 34 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index bbde9884ab8a..8973abdec9f6 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf) if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting)) goto err; - ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0); + ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data); if (ret) goto err; @@ -582,6 +582,26 @@ static const struct driver_info cdc_mbim_info_zlp = { .tx_fixup = cdc_mbim_tx_fixup, }; +/* The spefication explicitly allows NDPs to be placed anywhere in the + * frame, but some devices fail unless the NDP is placed after the IP + * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this + * behaviour. + * + * Note: The current implementation of this feature restricts each NTB + * to a single NDP, implying that multiplexed sessions cannot share an + * NTB. This might affect performace for multiplexed sessions. + */ +static const struct driver_info cdc_mbim_info_ndp_to_end = { + .description = "CDC MBIM", + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, + .bind = cdc_mbim_bind, + .unbind = cdc_mbim_unbind, + .manage_power = cdc_mbim_manage_power, + .rx_fixup = cdc_mbim_rx_fixup, + .tx_fixup = cdc_mbim_tx_fixup, + .data = CDC_NCM_FLAG_NDP_TO_END, +}; + static const struct usb_device_id mbim_devs[] = { /* This duplicate NCM entry is intentional. MBIM devices can * be disguised as NCM by default, and this is necessary to @@ -597,6 +617,10 @@ static const struct usb_device_id mbim_devs[] = { { USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info, }, + /* Huawei E3372 fails unless NDP comes after the IP packets */ + { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end, + }, /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 3b1ba8237768..1e9843a41168 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -955,10 +955,18 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_ * NTH16 header as we would normally do. NDP isn't written to the SKB yet, and * the wNdpIndex field in the header is actually not consistent with reality. It will be later. */ - if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) + if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) { if (ctx->delayed_ndp16->dwSignature == sign) return ctx->delayed_ndp16; + /* We can only push a single NDP to the end. Return + * NULL to send what we've already got and queue this + * skb for later. + */ + else if (ctx->delayed_ndp16->dwSignature) + return NULL; + } + /* follow the chain of NDPs, looking for a match */ while (ndpoffset) { ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset); -- cgit v1.2.3 From ed7d42e24effbd3681e909711a7a2119a85e9217 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Fri, 4 Dec 2015 16:29:10 +0100 Subject: net: qca_spi: fix transmit queue timeout handling In case of a tx queue timeout every transmit is blocked until the QCA7000 resets himself and triggers a sync which makes the driver flushs the tx ring. So avoid this blocking situation by triggering the sync immediately after the timeout. Waking the queue doesn't make sense in this situation. Signed-off-by: Stefan Wahren Fixes: 291ab06ecf67 ("net: qualcomm: new Ethernet over SPI driver for QCA7000") Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/qca_spi.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index ddb2c6c6ec94..689a4a5c8dcf 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev) netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n", jiffies, jiffies - dev->trans_start); qca->net_dev->stats.tx_errors++; - /* wake the queue if there is room */ - if (qcaspi_tx_ring_has_space(&qca->txr)) - netif_wake_queue(dev); + /* Trigger tx queue flush and QCA7000 reset */ + qca->sync = QCASPI_SYNC_UNKNOWN; } static int -- cgit v1.2.3 From 2ac46030331e2952a56c05bd15d99082fc410088 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 15 Nov 2015 15:11:00 +0200 Subject: virtio-net: Stop doing DMA from the stack Once virtio starts using the DMA API, we won't be able to safely DMA from the stack. virtio-net does a couple of config DMA requests from small stack buffers -- switch to using dynamically-allocated memory. This should have no effect on any performance-critical code paths. Reported-by: Andy Lutomirski Signed-off-by: Michael S. Tsirkin Tested-by: Andy Lutomirski --- drivers/net/virtio_net.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d8838dedb7a4..f94ab786088f 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -140,6 +140,12 @@ struct virtnet_info { /* CPU hot plug notifier */ struct notifier_block nb; + + /* Control VQ buffers: protected by the rtnl lock */ + struct virtio_net_ctrl_hdr ctrl_hdr; + virtio_net_ctrl_ack ctrl_status; + u8 ctrl_promisc; + u8 ctrl_allmulti; }; struct padded_vnet_hdr { @@ -976,31 +982,30 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *out) { struct scatterlist *sgs[4], hdr, stat; - struct virtio_net_ctrl_hdr ctrl; - virtio_net_ctrl_ack status = ~0; unsigned out_num = 0, tmp; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); - ctrl.class = class; - ctrl.cmd = cmd; + vi->ctrl_status = ~0; + vi->ctrl_hdr.class = class; + vi->ctrl_hdr.cmd = cmd; /* Add header */ - sg_init_one(&hdr, &ctrl, sizeof(ctrl)); + sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); sgs[out_num++] = &hdr; if (out) sgs[out_num++] = out; /* Add return status. */ - sg_init_one(&stat, &status, sizeof(status)); + sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); if (unlikely(!virtqueue_kick(vi->cvq))) - return status == VIRTIO_NET_OK; + return vi->ctrl_status == VIRTIO_NET_OK; /* Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. @@ -1009,7 +1014,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, !virtqueue_is_broken(vi->cvq)) cpu_relax(); - return status == VIRTIO_NET_OK; + return vi->ctrl_status == VIRTIO_NET_OK; } static int virtnet_set_mac_address(struct net_device *dev, void *p) @@ -1151,7 +1156,6 @@ static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg[2]; - u8 promisc, allmulti; struct virtio_net_ctrl_mac *mac_data; struct netdev_hw_addr *ha; int uc_count; @@ -1163,22 +1167,22 @@ static void virtnet_set_rx_mode(struct net_device *dev) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; - promisc = ((dev->flags & IFF_PROMISC) != 0); - allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); + vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); - sg_init_one(sg, &promisc, sizeof(promisc)); + sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, sg)) dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", - promisc ? "en" : "dis"); + vi->ctrl_promisc ? "en" : "dis"); - sg_init_one(sg, &allmulti, sizeof(allmulti)); + sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", - allmulti ? "en" : "dis"); + vi->ctrl_allmulti ? "en" : "dis"); uc_count = netdev_uc_count(dev); mc_count = netdev_mc_count(dev); -- cgit v1.2.3 From 4675390a9e7183bf45590e84a183e22e32c485a7 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 7 Dec 2015 10:09:06 +0100 Subject: ethernet: aurora: AURORA_NB8800 should depend on HAS_DMA If NO_DMA=y: ERROR: "dma_map_single" [drivers/net/ethernet/aurora/nb8800.ko] undefined! ERROR: "dma_unmap_page" [drivers/net/ethernet/aurora/nb8800.ko] undefined! ERROR: "dma_sync_single_for_cpu" [drivers/net/ethernet/aurora/nb8800.ko] undefined! ERROR: "dma_unmap_single" [drivers/net/ethernet/aurora/nb8800.ko] undefined! ERROR: "dma_alloc_coherent" [drivers/net/ethernet/aurora/nb8800.ko] undefined! ERROR: "dma_mapping_error" [drivers/net/ethernet/aurora/nb8800.ko] undefined! ERROR: "dma_map_page" [drivers/net/ethernet/aurora/nb8800.ko] undefined! ERROR: "dma_free_coherent" [drivers/net/ethernet/aurora/nb8800.ko] undefined! Signed-off-by: Geert Uytterhoeven Acked-by: Mans Rullgard Signed-off-by: David S. Miller --- drivers/net/ethernet/aurora/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig index a3c7106fdf85..8ba7f8ff3434 100644 --- a/drivers/net/ethernet/aurora/Kconfig +++ b/drivers/net/ethernet/aurora/Kconfig @@ -13,6 +13,7 @@ if NET_VENDOR_AURORA config AURORA_NB8800 tristate "Aurora AU-NB8800 support" + depends on HAS_DMA select PHYLIB help Support for the AU-NB8800 gigabit Ethernet controller. -- cgit v1.2.3 From 76a9a3642a0b72d5687d680150580d55b6ea9804 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Mon, 7 Dec 2015 06:25:57 -0500 Subject: qed: fix handling of concurrent ramrods. Concurrent non-blocking slowpath ramrods can be completed out-of-order on the completion chain. Recycling completed elements, while previously sent elements are still completion pending, can lead to overriding of active elements on the chain. Furthermore, sending pending slowpath ramrods currently lacks the update of the chain element physical pointer. This patch: * Ensures that ramrods are sent to the FW with consecutive echo values. * Handles out-of-order completions by freeing only first successive completed entries. * Updates the chain element physical pointer when copying a pending element into a free element for sending. Signed-off-by: Tomer Tayar Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_sp.h | 8 +++- drivers/net/ethernet/qlogic/qed/qed_spq.c | 63 +++++++++++++++++++++++-------- 2 files changed, 54 insertions(+), 17 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 31a1f1eb4f56..287fadfab52d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -124,8 +124,12 @@ struct qed_spq { dma_addr_t p_phys; struct qed_spq_entry *p_virt; - /* Used as index for completions (returns on EQ by FW) */ - u16 echo_idx; +#define SPQ_RING_SIZE \ + (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) + + /* Bitmap for handling out-of-order completions */ + DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE); + u8 comp_bitmap_idx; /* Statistics */ u32 unlimited_pending_count; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 7c0b8459666e..3dd548ab8df1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -112,8 +112,6 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) { - p_ent->elem.hdr.echo = 0; - p_hwfn->p_spq->echo_idx++; p_ent->flags = 0; switch (p_ent->comp_mode) { @@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { - struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + u16 echo = qed_chain_get_prod_idx(p_chain); struct slow_path_element *elem; struct core_db_data db; + p_ent->elem.hdr.echo = cpu_to_le16(echo); elem = qed_chain_produce(p_chain); if (!elem) { DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); @@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) p_spq->comp_count = 0; p_spq->comp_sent_count = 0; p_spq->unlimited_pending_count = 0; - p_spq->echo_idx = 0; + + bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); + p_spq->comp_bitmap_idx = 0; /* SPQ cid, cannot fail */ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); @@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq = p_hwfn->p_spq; if (p_ent->queue == &p_spq->unlimited_pending) { - struct qed_spq_entry *p_en2; if (list_empty(&p_spq->free_pool)) { list_add_tail(&p_ent->list, &p_spq->unlimited_pending); p_spq->unlimited_pending_count++; return 0; - } + } else { + struct qed_spq_entry *p_en2; - p_en2 = list_first_entry(&p_spq->free_pool, - struct qed_spq_entry, - list); - list_del(&p_en2->list); + p_en2 = list_first_entry(&p_spq->free_pool, + struct qed_spq_entry, + list); + list_del(&p_en2->list); + + /* Copy the ring element physical pointer to the new + * entry, since we are about to override the entire ring + * entry and don't want to lose the pointer. + */ + p_ent->elem.data_ptr = p_en2->elem.data_ptr; - /* Strcut assignment */ - *p_en2 = *p_ent; + *p_en2 = *p_ent; - kfree(p_ent); + kfree(p_ent); - p_ent = p_en2; + p_ent = p_en2; + } } /* entry is to be placed in 'pending' queue */ @@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { if (p_ent->elem.hdr.echo == echo) { + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; + list_del(&p_ent->list); - qed_chain_return_produced(&p_spq->chain); + /* Avoid overriding of SPQ entries when getting + * out-of-order completions, by marking the completions + * in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ + bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); + + while (test_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap)) { + bitmap_clear(p_spq->p_comp_bitmap, + p_spq->comp_bitmap_idx, + SPQ_RING_SIZE); + p_spq->comp_bitmap_idx++; + qed_chain_return_produced(&p_spq->chain); + } + p_spq->comp_count++; found = p_ent; break; } + + /* This is relatively uncommon - depends on scenarios + * which have mutliple per-PF sent ramrods. + */ + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", + le16_to_cpu(echo), + le16_to_cpu(p_ent->elem.hdr.echo)); } /* Release lock before callback, as callback may post -- cgit v1.2.3 From c78df14ee0f6bc5e8741b4324b600b7277abb13e Mon Sep 17 00:00:00 2001 From: Ariel Elior Date: Mon, 7 Dec 2015 06:25:58 -0500 Subject: qed: Fix BAR size split for some servers Can't rely on pci config space to discover bar size, as in some environments this returns a wrong, too large value. Instead, rely on device register, which contains the value provided by MFW at preboot. Signed-off-by: Ariel Elior Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 53 ++++++++++++++++---------- drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 4 ++ 2 files changed, 36 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 803b190ccada..817bbd5476ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1385,52 +1385,63 @@ err0: return rc; } -static u32 qed_hw_bar_size(struct qed_dev *cdev, - u8 bar_id) +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, + u8 bar_id) { - u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0); + u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE + : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); - return size / cdev->num_hwfns; + /* Get the BAR size(in KB) from hardware given val */ + return 1 << (val + 15); } int qed_hw_prepare(struct qed_dev *cdev, int personality) { - int rc, i; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + int rc; /* Store the precompiled init data ptrs */ qed_init_iro_array(cdev); /* Initialize the first hwfn - will learn number of hwfns */ - rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview, + rc = qed_hw_prepare_single(p_hwfn, + cdev->regview, cdev->doorbells, personality); if (rc) return rc; - personality = cdev->hwfns[0].hw_info.personality; + personality = p_hwfn->hw_info.personality; /* Initialize the rest of the hwfns */ - for (i = 1; i < cdev->num_hwfns; i++) { + if (cdev->num_hwfns > 1) { void __iomem *p_regview, *p_doorbell; + u8 __iomem *addr; + + /* adjust bar offset for second engine */ + addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; + p_regview = addr; - p_regview = cdev->regview + - i * qed_hw_bar_size(cdev, 0); - p_doorbell = cdev->doorbells + - i * qed_hw_bar_size(cdev, 1); - rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview, + /* adjust doorbell bar offset for second engine */ + addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; + p_doorbell = addr; + + /* prepare second hw function */ + rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, p_doorbell, personality); + + /* in case of error, need to free the previously + * initiliazed hwfn 0. + */ if (rc) { - /* Cleanup previously initialized hwfns */ - while (--i >= 0) { - qed_init_free(&cdev->hwfns[i]); - qed_mcp_free(&cdev->hwfns[i]); - qed_hw_hwfn_free(&cdev->hwfns[i]); - } - return rc; + qed_init_free(p_hwfn); + qed_mcp_free(p_hwfn); + qed_hw_hwfn_free(p_hwfn); } } - return 0; + return rc; } void qed_hw_remove(struct qed_dev *cdev) diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 7a5ce5914ace..e8df12335a97 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -363,4 +363,8 @@ 0x7 << 0) #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ 0 +#define PGLUE_B_REG_PF_BAR0_SIZE \ + 0x2aae60UL +#define PGLUE_B_REG_PF_BAR1_SIZE \ + 0x2aae64UL #endif -- cgit v1.2.3 From 8f16bc97fa2a47e2e46d36f2f682e1215ee172f5 Mon Sep 17 00:00:00 2001 From: Sudarsana Kalluru Date: Mon, 7 Dec 2015 06:25:59 -0500 Subject: qed: Correct slowpath interrupt scheme When using INTa, ISR might be called before device is configured for INTa [E.g., due to other device asserting the shared interrupt line], in which case the ISR would read the SISR registers that shouldn't be read unless HW is already configured for INTa. This might break interrupts later on. There's also an MSI-X issue due to this difference, although it's mostly theoretical. This patch changes the initialization order, calling request_irq() for the slowpath interrupt only after the chip is configured for working in the preferred interrupt mode. Signed-off-by: Sudarsana Kalluru Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 3 ++ drivers/net/ethernet/qlogic/qed/qed_int.c | 33 +++++++++++++----- drivers/net/ethernet/qlogic/qed/qed_int.h | 15 +++++--- drivers/net/ethernet/qlogic/qed/qed_main.c | 56 ++++++++++-------------------- 4 files changed, 55 insertions(+), 52 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index ac17d8669b1a..1292c360390c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -299,6 +299,7 @@ struct qed_hwfn { /* Flag indicating whether interrupts are enabled or not*/ bool b_int_enabled; + bool b_int_requested; struct qed_mcp_info *mcp_info; @@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u8 *input_buf, u32 max_size, u8 *unzip_buf); +int qed_slowpath_irq_req(struct qed_hwfn *hwfn); + #define QED_ETH_INTERFACE_VERSION 300 #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index de50e84902af..9cc9d62c1fec 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); } -void qed_int_igu_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_int_mode int_mode) +int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_int_mode int_mode) { - int i; - - p_hwfn->b_int_enabled = 1; + int rc, i; /* Mask non-link attentions */ for (i = 0; i < 9; i++) qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); - /* Enable interrupt Generation */ - qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); - /* Configure AEU signal change to produce attentions for link */ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); @@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn, /* Unmask AEU signals toward IGU */ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); + if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { + rc = qed_slowpath_irq_req(p_hwfn); + if (rc != 0) { + DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); + return -EINVAL; + } + p_hwfn->b_int_requested = true; + } + /* Enable interrupt Generation */ + qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); + p_hwfn->b_int_enabled = 1; + + return rc; } void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, @@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, return info->igu_sb_cnt; } + +void qed_int_disable_post_isr_release(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) + cdev->hwfns[i].b_int_requested = false; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 16b57518e706..51e0b09a7f47 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, int *p_iov_blks); /** - * @file + * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR + * release. The API need to be called after releasing all slowpath IRQs + * of the device. + * + * @param cdev * - * @brief Interrupt handler */ +void qed_int_disable_post_isr_release(struct qed_dev *cdev); #define QED_CAU_DEF_RX_TIMER_RES 0 #define QED_CAU_DEF_TX_TIMER_RES 0 @@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn, * @param p_hwfn * @param p_ptt * @param int_mode + * + * @return int */ -void qed_int_igu_enable(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - enum qed_int_mode int_mode); +int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_int_mode int_mode); /** * @brief - Initialize CAU status block entry diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 947c7af72b25..174f7341c5c3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance) return rc; } -static int qed_slowpath_irq_req(struct qed_dev *cdev) +int qed_slowpath_irq_req(struct qed_hwfn *hwfn) { - int i = 0, rc = 0; + struct qed_dev *cdev = hwfn->cdev; + int rc = 0; + u8 id; if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { - /* Request all the slowpath MSI-X vectors */ - for (i = 0; i < cdev->num_hwfns; i++) { - snprintf(cdev->hwfns[i].name, NAME_SIZE, - "sp-%d-%02x:%02x.%02x", - i, cdev->pdev->bus->number, - PCI_SLOT(cdev->pdev->devfn), - cdev->hwfns[i].abs_pf_id); - - rc = request_irq(cdev->int_params.msix_table[i].vector, - qed_msix_sp_int, 0, - cdev->hwfns[i].name, - cdev->hwfns[i].sp_dpc); - if (rc) - break; - - DP_VERBOSE(&cdev->hwfns[i], - (NETIF_MSG_INTR | QED_MSG_SP), + id = hwfn->my_id; + snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", + id, cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + rc = request_irq(cdev->int_params.msix_table[id].vector, + qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); + if (!rc) + DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), "Requested slowpath MSI-X\n"); - } - - if (i != cdev->num_hwfns) { - /* Free already request MSI-X vectors */ - for (i--; i >= 0; i--) { - unsigned int vec = - cdev->int_params.msix_table[i].vector; - synchronize_irq(vec); - free_irq(cdev->int_params.msix_table[i].vector, - cdev->hwfns[i].sp_dpc); - } - } } else { unsigned long flags = 0; @@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev) if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].b_int_requested) + break; synchronize_irq(cdev->int_params.msix_table[i].vector); free_irq(cdev->int_params.msix_table[i].vector, cdev->hwfns[i].sp_dpc); } } else { - free_irq(cdev->pdev->irq, cdev); + if (QED_LEADING_HWFN(cdev)->b_int_requested) + free_irq(cdev->pdev->irq, cdev); } + qed_int_disable_post_isr_release(cdev); } static int qed_nic_stop(struct qed_dev *cdev) @@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev, if (rc) goto err1; - /* Request the slowpath IRQ */ - rc = qed_slowpath_irq_req(cdev); - if (rc) - goto err2; - /* Allocate stream for unzipping */ rc = qed_alloc_stream_mem(cdev); if (rc) { DP_NOTICE(cdev, "Failed to allocate stream memory\n"); - goto err3; + goto err2; } /* Start the slowpath */ -- cgit v1.2.3 From e5d4b29fe86a911f447d2f1e95383e04c7cfb465 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Mon, 7 Dec 2015 13:04:30 +0100 Subject: vxlan: move IPv6 outpute route calculation to a function Will be used also for ndo_fill_metadata_dst. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 44 ++++++++++++++++++++++++++++++++++---------- 1 file changed, 34 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 6369a5734d4c..5a38558da157 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1848,6 +1848,34 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk !(vxflags & VXLAN_F_UDP_CSUM)); } +#if IS_ENABLED(CONFIG_IPV6) +static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, + struct sk_buff *skb, int oif, + const struct in6_addr *daddr, + struct in6_addr *saddr) +{ + struct dst_entry *ndst; + struct flowi6 fl6; + int err; + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_oif = oif; + fl6.daddr = *daddr; + fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; + fl6.flowi6_mark = skb->mark; + fl6.flowi6_proto = IPPROTO_UDP; + + err = ipv6_stub->ipv6_dst_lookup(vxlan->net, + vxlan->vn6_sock->sock->sk, + &ndst, &fl6); + if (err < 0) + return ERR_PTR(err); + + *saddr = fl6.saddr; + return ndst; +} +#endif + /* Bypass encapsulation if the destination is local */ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, struct vxlan_dev *dst_vxlan) @@ -2035,21 +2063,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, #if IS_ENABLED(CONFIG_IPV6) } else { struct dst_entry *ndst; - struct flowi6 fl6; + struct in6_addr saddr; u32 rt6i_flags; if (!vxlan->vn6_sock) goto drop; sk = vxlan->vn6_sock->sock->sk; - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0; - fl6.daddr = dst->sin6.sin6_addr; - fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; - fl6.flowi6_mark = skb->mark; - fl6.flowi6_proto = IPPROTO_UDP; - - if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) { + ndst = vxlan6_get_route(vxlan, skb, + rdst ? rdst->remote_ifindex : 0, + &dst->sin6.sin6_addr, &saddr); + if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); dev->stats.tx_carrier_errors++; @@ -2081,7 +2105,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ttl = ttl ? : ip6_dst_hoplimit(ndst); - err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr, + err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, 0, ttl, src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), flags); -- cgit v1.2.3 From 239e944ff532de6e9579b3913d7f76b4f01c7e2f Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Mon, 7 Dec 2015 13:04:31 +0100 Subject: vxlan: support ndo_fill_metadata_dst also for IPv6 Fill the metadata correctly even when tunneling over IPv6. Also, check that the provided metadata is of an address family that is supported by the tunnel. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5a38558da157..14cfa4cdf903 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2419,9 +2419,30 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) vxlan->cfg.port_max, true); dport = info->key.tp_dst ? : vxlan->cfg.dst_port; - if (ip_tunnel_info_af(info) == AF_INET) + if (ip_tunnel_info_af(info) == AF_INET) { + if (!vxlan->vn4_sock) + return -EINVAL; return egress_ipv4_tun_info(dev, skb, info, sport, dport); - return -EINVAL; + } else { +#if IS_ENABLED(CONFIG_IPV6) + struct dst_entry *ndst; + + if (!vxlan->vn6_sock) + return -EINVAL; + ndst = vxlan6_get_route(vxlan, skb, 0, + &info->key.u.ipv6.dst, + &info->key.u.ipv6.src); + if (IS_ERR(ndst)) + return PTR_ERR(ndst); + dst_release(ndst); + + info->key.tp_src = sport; + info->key.tp_dst = dport; +#else /* !CONFIG_IPV6 */ + return -EPFNOSUPPORT; +#endif + } + return 0; } static const struct net_device_ops vxlan_netdev_ops = { -- cgit v1.2.3 From ce212d0f6f5523ca9eb8020267f1aa4eb6869ba8 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Mon, 7 Dec 2015 16:29:08 +0100 Subject: vxlan: interpret IP headers for ECN correctly When looking for outer IP header, use the actual socket address family, not the address family of the default destination which is not set for metadata based interfaces (and doesn't have to match the address family of the received packet even if it was set). Fix also the misleading comment. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 14cfa4cdf903..ba363cedef80 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1158,7 +1158,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, struct pcpu_sw_netstats *stats; union vxlan_addr saddr; int err = 0; - union vxlan_addr *remote_ip; /* For flow based devices, map all packets to VNI 0 */ if (vs->flags & VXLAN_F_COLLECT_METADATA) @@ -1169,7 +1168,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, if (!vxlan) goto drop; - remote_ip = &vxlan->default_dst.remote_ip; skb_reset_mac_header(skb); skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); skb->protocol = eth_type_trans(skb, vxlan->dev); @@ -1179,8 +1177,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) goto drop; - /* Re-examine inner Ethernet packet */ - if (remote_ip->sa.sa_family == AF_INET) { + /* Get data from the outer IP header */ + if (vxlan_get_sk_family(vs) == AF_INET) { oip = ip_hdr(skb); saddr.sin.sin_addr.s_addr = oip->saddr; saddr.sa.sa_family = AF_INET; -- cgit v1.2.3 From 73d4da7b9fec13cbddcf548888c4d0e46f1d5087 Mon Sep 17 00:00:00 2001 From: Wengang Wang Date: Thu, 8 Oct 2015 13:21:33 +0800 Subject: IB/mlx4: Use correct order of variables in log message There is a mis-order in mlx4 log. Fix it. Signed-off-by: Wengang Wang Acked-by: Or Gerlitz Signed-off-by: Doug Ledford --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 2177e56ed0be..d48d5793407d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1010,7 +1010,7 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave, if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED && smp->method == IB_MGMT_METHOD_GET) || network_view) { mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n", - slave, smp->method, smp->mgmt_class, + slave, smp->mgmt_class, smp->method, network_view ? "Network" : "Host", be16_to_cpu(smp->attr_id)); return -EPERM; -- cgit v1.2.3 From a322a1bcf32900e9c9f4f9d3e09717513d918cdc Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Mon, 7 Dec 2015 19:17:30 -0800 Subject: geneve: Fix IPv6 xmit stats update. Call to iptunnel_xmit_stats() is not required after udp-tunnel6-xmit. By calling iptunnel_xmit_stats() results in incorrect device stats. Following patch drops this call. Signed-off-by: Pravin B Shelar Signed-off-by: David S. Miller --- drivers/net/geneve.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index de5c30c9f059..c2b79f5d1c89 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -967,8 +967,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, &fl6.saddr, &fl6.daddr, prio, ttl, sport, geneve->dst_port, !udp_csum); - - iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; tx_error: -- cgit v1.2.3 From f406ce4234149c302a7acb0be01c08de7b40bdb5 Mon Sep 17 00:00:00 2001 From: Pavel Fedin Date: Tue, 8 Dec 2015 10:37:44 +0300 Subject: net: thunderx: Correctly distinguish between VF and LMAC count Commit bc69fdfc6c13 ("net: thunderx: Enable BGX LMAC's RX/TX only after VF is up") introduces lmac_cnt member and starts verifying VF number against it. This is plain wrong, and works only because currently we have hardcoded 1:1 mapping between VFs and LMACs, and in this case num_vf_en and lmac_cnt are always equal. However in future this may change, and the code will badly misbehave. The worst consequence of this is failure to deliver link status messages, causing VFs to go defunct because since commit 0b72a9a1060e ("net: thunderx: Switchon carrier only upon interface link up") VF will not fully bring itself up without it. This patch fixes the potential problem by doing VF number checks against the num_vf_en. Since lmac_cnt is not used anywhere else, it is removed. Additionally some duplicated code is factored out into nic_enable_vf() Signed-off-by: Pavel Fedin Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic_main.c | 39 ++++++++++++-------------- 1 file changed, 18 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 4b7fd63ae57c..5f24d11cb16a 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -37,7 +37,6 @@ struct nicpf { #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) u8 vf_lmac_map[MAX_LMAC]; - u8 lmac_cnt; struct delayed_work dwork; struct workqueue_struct *check_link; u8 link[MAX_LMAC]; @@ -280,7 +279,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) u64 lmac_credit; nic->num_vf_en = 0; - nic->lmac_cnt = 0; for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) { if (!(bgx_map & (1 << bgx))) @@ -290,7 +288,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic) nic->vf_lmac_map[next_bgx_lmac++] = NIC_SET_VF_LMAC_MAP(bgx, lmac); nic->num_vf_en += lmac_cnt; - nic->lmac_cnt += lmac_cnt; /* Program LMAC credits */ lmac_credit = (1ull << 1); /* channel credit enable */ @@ -618,6 +615,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk) return 0; } +static void nic_enable_vf(struct nicpf *nic, int vf, bool enable) +{ + int bgx, lmac; + + nic->vf_enabled[vf] = enable; + + if (vf >= nic->num_vf_en) + return; + + bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); + + bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable); +} + /* Interrupt handler to handle mailbox messages from VFs */ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) { @@ -717,29 +729,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) break; case NIC_MBOX_MSG_CFG_DONE: /* Last message of VF config msg sequence */ - nic->vf_enabled[vf] = true; - if (vf >= nic->lmac_cnt) - goto unlock; - - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - - bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true); + nic_enable_vf(nic, vf, true); goto unlock; case NIC_MBOX_MSG_SHUTDOWN: /* First msg in VF teardown sequence */ - nic->vf_enabled[vf] = false; if (vf >= nic->num_vf_en) nic->sqs_used[vf - nic->num_vf_en] = false; nic->pqs_vf[vf] = 0; - - if (vf >= nic->lmac_cnt) - break; - - bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); - - bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false); + nic_enable_vf(nic, vf, false); break; case NIC_MBOX_MSG_ALLOC_SQS: nic_alloc_sqs(nic, &mbx.sqs_alloc); @@ -958,7 +955,7 @@ static void nic_poll_for_link(struct work_struct *work) mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; - for (vf = 0; vf < nic->lmac_cnt; vf++) { + for (vf = 0; vf < nic->num_vf_en; vf++) { /* Poll only if VF is UP */ if (!nic->vf_enabled[vf]) continue; -- cgit v1.2.3 From 90186af404ada5a47b875bf3c16d0b02bb023ea0 Mon Sep 17 00:00:00 2001 From: Peter Wu Date: Tue, 8 Dec 2015 12:17:42 +0100 Subject: r8152: fix lockup when runtime PM is enabled When an interface is brought up which was previously suspended (via runtime PM), it would hang. This happens because napi_disable is called before napi_enable. Solve this by avoiding napi_enable in the resume during open function (netif_running is true when open is called, IFF_UP is set after a successful open; netif_running is false when close is called, but IFF_UP is then still set). While at it, remove WORK_ENABLE check from rtl8152_open (introduced with the original change) because it cannot happen: - After this patch, runtime resume will not set it during rtl8152_open. - When link is up, rtl8152_open is not called. - When link is down during system/auto suspend/resume, it is not set. Fixes: 41cec84cf285 ("r8152: don't enable napi before rx ready") Link: https://lkml.kernel.org/r/20151205105912.GA1766@al Signed-off-by: Peter Wu Acked-by: Hayes Wang Signed-off-by: David S. Miller --- drivers/net/usb/r8152.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index d9427ca3dba7..2e32c41536ae 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3067,17 +3067,6 @@ static int rtl8152_open(struct net_device *netdev) mutex_lock(&tp->control); - /* The WORK_ENABLE may be set when autoresume occurs */ - if (test_bit(WORK_ENABLE, &tp->flags)) { - clear_bit(WORK_ENABLE, &tp->flags); - usb_kill_urb(tp->intr_urb); - cancel_delayed_work_sync(&tp->schedule); - - /* disable the tx/rx, if the workqueue has enabled them. */ - if (netif_carrier_ok(netdev)) - tp->rtl_ops.disable(tp); - } - tp->rtl_ops.up(tp); rtl8152_set_speed(tp, AUTONEG_ENABLE, @@ -3124,12 +3113,6 @@ static int rtl8152_close(struct net_device *netdev) } else { mutex_lock(&tp->control); - /* The autosuspend may have been enabled and wouldn't - * be disable when autoresume occurs, because the - * netif_running() would be false. - */ - rtl_runtime_suspend_enable(tp, false); - tp->rtl_ops.down(tp); mutex_unlock(&tp->control); @@ -3512,7 +3495,7 @@ static int rtl8152_resume(struct usb_interface *intf) netif_device_attach(tp->netdev); } - if (netif_running(tp->netdev)) { + if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) { if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); @@ -3532,6 +3515,8 @@ static int rtl8152_resume(struct usb_interface *intf) } usb_submit_urb(tp->intr_urb, GFP_KERNEL); } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { + if (tp->netdev->flags & IFF_UP) + rtl_runtime_suspend_enable(tp, false); clear_bit(SELECTIVE_SUSPEND, &tp->flags); } -- cgit v1.2.3 From 8cde3e4425df26331dac4d0f1f7114c031728a3c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 8 Dec 2015 16:17:29 +0100 Subject: net: fsl: avoid 64-bit warning on pq_mdio The pq_mdio driver can now be built for ARM64, where we get a format string warning: drivers/net/ethernet/freescale/fsl_pq_mdio.c: In function 'fsl_pq_mdio_probe': drivers/net/ethernet/freescale/fsl_pq_mdio.c:467:25: warning: format '%x' expects argument of type 'unsigned int', but argument 3 has type 'long int' [-Wformat=] The argument is an implicit ptrdiff_t from the subtraction of two pointers, so we should use the %z format string modifier to make this work on 64-bit architectures. Signed-off-by: Arnd Bergmann Fixes: fe761bcb9046 ("net: fsl: expands dependencies of NET_VENDOR_FREESCALE") Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fsl_pq_mdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 55c36230e176..40071dad1c57 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -464,7 +464,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) * address). Print error message but continue anyway. */ if ((void *)tbipa > priv->map + resource_size(&res) - 4) - dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n", + dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n", ((void *)tbipa - priv->map) + 4); iowrite32be(be32_to_cpup(prop), tbipa); -- cgit v1.2.3 From b0a8d1a0b6e569b7dd14322ca2df4d576f325908 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 8 Dec 2015 16:28:59 +0100 Subject: net: ezchip: fix address space confusion in nps_enet.c The nps_enet driver happily mixes virtual, physical and __iomem addresses, which are all different depending on the architecture and configuration. That causes a warning when building the code on ARM with LPAE mode enabled: drivers/net/ethernet/ezchip/nps_enet.c: In function 'nps_enet_send_frame': drivers/net/ethernet/ezchip/nps_enet.c:370:13: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] but will also fail to work for other reasons. In this patch, I'm trying to change the code to use only normal kernel pointers, which I assume is what the author actually meant: * For reading or writing a 32-bit word that may be unaligned when an SKB contains unaligned data, I'm using get_unaligned/put_unaligned() rather than memcpy_fromio/toio. * For converting a u8 pointer to a u32 pointer, I use a cast rather than the incorrect virt_to_phys. * For copying a couple of bytes from one place to another while respecting alignment, I use memcpy instead of memcpy_toio. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/ezchip/nps_enet.c | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index 63c2bcf8031a..b1026689b78f 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev, *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { - u32 buf = - nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); - - /* to accommodate word-unaligned address of "reg" - * we have to do memcpy_toio() instead of simple "=". - */ - memcpy_toio((void __iomem *)reg, &buf, sizeof(buf)); + u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); + put_unaligned(buf, reg); } } /* copy last bytes (if any) */ if (last) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); - - memcpy_toio((void __iomem *)reg, &buf, last); + memcpy((u8*)reg, &buf, last); } } @@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev, struct nps_enet_tx_ctl tx_ctrl; short length = skb->len; u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); - u32 *src = (u32 *)virt_to_phys(skb->data); + u32 *src = (void *)skb->data; bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); tx_ctrl.value = 0; @@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev, if (src_is_aligned) for (i = 0; i < len; i++, src++) nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); - else { /* !src_is_aligned */ - for (i = 0; i < len; i++, src++) { - u32 buf; - - /* to accommodate word-unaligned address of "src" - * we have to do memcpy_fromio() instead of simple "=" - */ - memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf)); - nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf); - } - } + else /* !src_is_aligned */ + for (i = 0; i < len; i++, src++) + nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, + get_unaligned(src)); + /* Write the length of the Frame */ tx_ctrl.nt = length; -- cgit v1.2.3 From 651df2183543bc92f5dbcf99cd9e236ead0bc4c5 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Wed, 9 Dec 2015 19:56:31 +0100 Subject: phy: micrel: Fix finding PHY properties in MAC node. commit 8b63ec1837fa ("phylib: Make PHYs children of their MDIO bus, not the bus' parent.") changed the parenting of PHY devices, making them a child of the MDIO bus, instead of the MAC device. This broken the Micrel PHY driver which has a deprecated feature of allowing PHY properties to be placed into the MAC node. In order to find the MAC node, we need to walk up the tree of devices until we find one with an OF node attached. Reported-by: Dinh Nguyen Suggested-by: David Daney Acked-by: David Daney Fixes: 8b63ec1837fa ("phylib: Make PHYs children of their MDIO bus, not the bus' parent.") Signed-off-by: Andrew Lunn Tested-by: Dinh Nguyen Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/micrel.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index cf6312fafea5..e13ad6cdcc22 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -339,9 +339,18 @@ static int ksz9021_config_init(struct phy_device *phydev) { const struct device *dev = &phydev->dev; const struct device_node *of_node = dev->of_node; + const struct device *dev_walker; - if (!of_node && dev->parent->of_node) - of_node = dev->parent->of_node; + /* The Micrel driver has a deprecated option to place phy OF + * properties in the MAC node. Walk up the tree of devices to + * find a device with an OF node. + */ + dev_walker = &phydev->dev; + do { + of_node = dev_walker->of_node; + dev_walker = dev_walker->parent; + + } while (!of_node && dev_walker); if (of_node) { ksz9021_load_values_from_of(phydev, of_node, -- cgit v1.2.3 From de68f5de56512a2ff5d5810ef4d54c53470c3c45 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 9 Dec 2015 19:35:41 -0500 Subject: bnxt_en: Fix bitmap declaration to work on 32-bit arches. The declaration of the bitmap vf_req_snif_bmap using fixed array of unsigned long will only work on 64-bit archs. Use DECLARE_BITMAP instead which will work on all archs. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index bdf094fb6ef9..51671e3c0e58 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) req.ver_upd = DRV_VER_UPD; if (BNXT_PF(bp)) { - unsigned long vf_req_snif_bmap[4]; + DECLARE_BITMAP(vf_req_snif_bmap, 256); u32 *data = (u32 *)vf_req_snif_bmap; - memset(vf_req_snif_bmap, 0, 32); + memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); - for (i = 0; i < 8; i++) { - req.vf_req_fwd[i] = cpu_to_le32(*data); - data++; - } + for (i = 0; i < 8; i++) + req.vf_req_fwd[i] = cpu_to_le32(data[i]); + req.enables |= cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } -- cgit v1.2.3 From caefe526d7b5af11d9b5977b2862eb144fa45537 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 9 Dec 2015 19:35:42 -0500 Subject: bnxt_en: Change bp->state to bitmap. This allows multiple independent bits to be set for various states. Subsequent patches to implement tx timeout reset will require this. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++++---- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 ++--- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 51671e3c0e58..fd89e9d70ab6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4602,7 +4602,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bp->nge_port_cnt = 1; } - bp->state = BNXT_STATE_OPEN; + set_bit(BNXT_STATE_OPEN, &bp->state); bnxt_enable_int(bp); /* Enable TX queues */ bnxt_tx_enable(bp); @@ -4678,7 +4678,7 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* Change device state to avoid TX queue wake up's */ bnxt_tx_disable(bp); - bp->state = BNXT_STATE_CLOSED; + clear_bit(BNXT_STATE_OPEN, &bp->state); cancel_work_sync(&bp->sp_task); /* Flush rings before disabling interrupts */ @@ -5080,7 +5080,7 @@ static void bnxt_sp_task(struct work_struct *work) struct bnxt *bp = container_of(work, struct bnxt, sp_task); int rc; - if (bp->state != BNXT_STATE_OPEN) + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) return; if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) @@ -5185,7 +5185,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->timer.function = bnxt_timer; bp->current_interval = BNXT_TIMER_INTERVAL; - bp->state = BNXT_STATE_CLOSED; + clear_bit(BNXT_STATE_OPEN, &bp->state); return 0; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 674bc5159b91..a8b688151e0c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -925,9 +925,8 @@ struct bnxt { struct timer_list timer; - int state; -#define BNXT_STATE_CLOSED 0 -#define BNXT_STATE_OPEN 1 + unsigned long state; +#define BNXT_STATE_OPEN 0 struct bnxt_irq *irq_tbl; u8 mac_addr[ETH_ALEN]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 7a9af2887d8e..ea044bbcd384 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -21,7 +21,7 @@ #ifdef CONFIG_BNXT_SRIOV static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) { - if (bp->state != BNXT_STATE_OPEN) { + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { netdev_err(bp->dev, "vf ndo called though PF is down\n"); return -EINVAL; } -- cgit v1.2.3 From 4cebdcec0933bf39c0ab42e8ce8c9d72f803fbe9 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 9 Dec 2015 19:35:43 -0500 Subject: bnxt_en: Don't cancel sp_task from bnxt_close_nic(). When implementing driver reset from tx_timeout in the next patch, bnxt_close_nic() will be called from the sp_task workqueue. Calling cancel_work() on sp_task will hang the workqueue. Instead, set a new bit BNXT_STATE_IN_SP_TASK when bnxt_sp_task() is running. bnxt_close_nic() will wait for BNXT_STATE_IN_SP_TASK to clear before proceeding. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 13 +++++++++++-- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index fd89e9d70ab6..f5f448959ee2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4679,7 +4679,9 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_tx_disable(bp); clear_bit(BNXT_STATE_OPEN, &bp->state); - cancel_work_sync(&bp->sp_task); + smp_mb__after_atomic(); + while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) + msleep(20); /* Flush rings before disabling interrupts */ bnxt_shutdown_nic(bp, irq_re_init); @@ -5080,8 +5082,12 @@ static void bnxt_sp_task(struct work_struct *work) struct bnxt *bp = container_of(work, struct bnxt, sp_task); int rc; - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); return; + } if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event)) bnxt_cfg_rx_mode(bp); @@ -5107,6 +5113,9 @@ static void bnxt_sp_task(struct work_struct *work) } if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) bnxt_reset_task(bp); + + smp_mb__before_atomic(); + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a8b688151e0c..f199f4cc8ffe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -927,6 +927,7 @@ struct bnxt { unsigned long state; #define BNXT_STATE_OPEN 0 +#define BNXT_STATE_IN_SP_TASK 1 struct bnxt_irq *irq_tbl; u8 mac_addr[ETH_ALEN]; -- cgit v1.2.3 From 028de140ffdf481d4948de663b33dae78e1e9cc8 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Wed, 9 Dec 2015 19:35:44 -0500 Subject: bnxt_en: Implement missing tx timeout reset logic. The reset logic calls bnxt_close_nic() and bnxt_open_nic() under rtnl_lock from bnxt_sp_task. BNXT_STATE_IN_SP_TASK must be cleared before calling bnxt_close_nic() to avoid deadlock. v2: Fixed white space error. Thanks Dave. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f5f448959ee2..07f5f239cb65 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5031,8 +5031,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) static void bnxt_reset_task(struct bnxt *bp) { bnxt_dbg_dump_states(bp); - if (netif_running(bp->dev)) - bnxt_tx_disable(bp); /* prevent tx timout again */ + if (netif_running(bp->dev)) { + bnxt_close_nic(bp, false, false); + bnxt_open_nic(bp, false, false); + } } static void bnxt_tx_timeout(struct net_device *dev) @@ -5111,8 +5113,16 @@ static void bnxt_sp_task(struct work_struct *work) bnxt_hwrm_tunnel_dst_port_free( bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN); } - if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) + if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) { + /* bnxt_reset_task() calls bnxt_close_nic() which waits + * for BNXT_STATE_IN_SP_TASK to clear. + */ + clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_lock(); bnxt_reset_task(bp); + set_bit(BNXT_STATE_IN_SP_TASK, &bp->state); + rtnl_unlock(); + } smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); -- cgit v1.2.3 From f1c2ef40c6436f8fa287ff1be2c75c4932180b1f Mon Sep 17 00:00:00 2001 From: Bert Kenward Date: Fri, 11 Dec 2015 09:39:32 +0000 Subject: sfc: only use RSS filters if we're using RSS Without this, filter insertion on a VF would fail if only one channel was in use. This would include the unicast station filter and therefore no traffic would be received. Signed-off-by: Bert Kenward Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 24 ++++++++++++------------ drivers/net/ethernet/sfc/efx.h | 5 +++++ drivers/net/ethernet/sfc/farch.c | 2 +- 3 files changed, 18 insertions(+), 13 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index bc6d21b471be..e6a084a6be12 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3299,7 +3299,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx, new_spec.priority = EFX_FILTER_PRI_AUTO; new_spec.flags = (EFX_FILTER_FLAG_RX | - EFX_FILTER_FLAG_RX_RSS); + (efx_rss_enabled(efx) ? + EFX_FILTER_FLAG_RX_RSS : 0)); new_spec.dmaq_id = 0; new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT; rc = efx_ef10_filter_push(efx, &new_spec, @@ -3921,6 +3922,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_dev_addr *addr_list; + enum efx_filter_flags filter_flags; struct efx_filter_spec spec; u8 baddr[ETH_ALEN]; unsigned int i, j; @@ -3935,11 +3937,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, addr_count = table->dev_uc_count; } + filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, addr_list[i].addr); rc = efx_ef10_filter_insert(efx, &spec, true); @@ -3968,9 +3970,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx, if (multicast && rollback) { /* Also need an Ethernet broadcast filter */ - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); rc = efx_ef10_filter_insert(efx, &spec, true); @@ -4000,13 +4000,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, { struct efx_ef10_filter_table *table = efx->filter_state; struct efx_ef10_nic_data *nic_data = efx->nic_data; + enum efx_filter_flags filter_flags; struct efx_filter_spec spec; u8 baddr[ETH_ALEN]; int rc; - efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0; + + efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); if (multicast) efx_filter_set_mc_def(&spec); @@ -4023,8 +4024,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast, if (!nic_data->workaround_26807) { /* Also need an Ethernet broadcast filter */ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, - EFX_FILTER_FLAG_RX_RSS, - 0); + filter_flags, 0); eth_broadcast_addr(baddr); efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 1aaf76c1ace8..10827476bc0b 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \ EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE) +static inline bool efx_rss_enabled(struct efx_nic *efx) +{ + return efx->rss_spread > 1; +} + /* Filters */ void efx_mac_reconfigure(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 5a1c5a8f278a..133e9e35be9e 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx, */ spec->priority = EFX_FILTER_PRI_AUTO; spec->flags = (EFX_FILTER_FLAG_RX | - (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) | + (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); spec->dmaq_id = 0; } -- cgit v1.2.3 From 946973a348a16f724374e0f818b31c095686471f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 10 Dec 2015 17:23:09 +0200 Subject: net:hns: annotate IO address space properly Mark address pointer with __iomem in the IO accessors. Otherwise we will get a sparse complain like following .../hns/hns_dsaf_reg.h:991:36: warning: incorrect type in argument 1 (different address spaces) .../hns/hns_dsaf_reg.h:991:36: expected unsigned char [noderef] [usertype] *base .../hns/hns_dsaf_reg.h:991:36: got void *base Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index b475e1bf2e6f..bdbd80423b17 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -898,7 +898,7 @@ #define XGMAC_PAUSE_CTL_RSP_MODE_B 2 #define XGMAC_PAUSE_CTL_TX_XOFF_B 3 -static inline void dsaf_write_reg(void *base, u32 reg, u32 value) +static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) { u8 __iomem *reg_addr = ACCESS_ONCE(base); @@ -908,7 +908,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value) #define dsaf_write_dev(a, reg, value) \ dsaf_write_reg((a)->io_base, (reg), (value)) -static inline u32 dsaf_read_reg(u8 *base, u32 reg) +static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg) { u8 __iomem *reg_addr = ACCESS_ONCE(base); @@ -927,8 +927,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg) #define dsaf_set_bit(origin, shift, val) \ dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) -static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, - u32 val) +static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, + u32 shift, u32 val) { u32 origin = dsaf_read_reg(base, reg); @@ -947,7 +947,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, #define dsaf_get_bit(origin, shift) \ dsaf_get_field((origin), (1ull << (shift)), (shift)) -static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) +static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, + u32 shift) { u32 origin; -- cgit v1.2.3 From 98900a80d5343901634852190e2728ea1ffec250 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 10 Dec 2015 17:23:10 +0200 Subject: net:hns: print MAC with %pM printf() has a dedicated specifier to print MAC addresses. Use it instead of pushing each byte via stack. Signed-off-by: Andy Shevchenko Signed-off-by: David S. Miller --- drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | 49 +++++++--------------- 1 file changed, 14 insertions(+), 35 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 2a98eba660c0..b674414a4d72 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -1259,12 +1259,8 @@ int hns_dsaf_set_mac_uc_entry( if (MAC_IS_ALL_ZEROS(mac_entry->addr) || MAC_IS_BROADCAST(mac_entry->addr) || MAC_IS_MULTICAST(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", - dsaf_dev->ae_dev.name, mac_entry->addr[0], - mac_entry->addr[1], mac_entry->addr[2], - mac_entry->addr[3], mac_entry->addr[4], - mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n", + dsaf_dev->ae_dev.name, mac_entry->addr); return -EINVAL; } @@ -1331,12 +1327,8 @@ int hns_dsaf_set_mac_mc_entry( /* mac addr check */ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n", - dsaf_dev->ae_dev.name, mac_entry->addr[0], - mac_entry->addr[1], mac_entry->addr[2], - mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n", + dsaf_dev->ae_dev.name, mac_entry->addr); return -EINVAL; } @@ -1410,11 +1402,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev, /*chechk mac addr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", - mac_entry->addr[0], mac_entry->addr[1], - mac_entry->addr[2], mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n", + mac_entry->addr); return -EINVAL; } @@ -1497,9 +1486,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id, /*check mac addr */ if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) { - dev_err(dsaf_dev->dev, - "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n", - addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); + dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n", + addr); return -EINVAL; } @@ -1563,11 +1551,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev, /*check mac addr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n", - mac_entry->addr[0], mac_entry->addr[1], - mac_entry->addr[2], mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n", + mac_entry->addr); return -EINVAL; } @@ -1644,11 +1629,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev, /* check macaddr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr) || MAC_IS_BROADCAST(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", - mac_entry->addr[0], mac_entry->addr[1], - mac_entry->addr[2], mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", + mac_entry->addr); return -EINVAL; } @@ -1695,11 +1677,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev, /*check mac addr */ if (MAC_IS_ALL_ZEROS(mac_entry->addr) || MAC_IS_BROADCAST(mac_entry->addr)) { - dev_err(dsaf_dev->dev, - "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n", - mac_entry->addr[0], mac_entry->addr[1], - mac_entry->addr[2], mac_entry->addr[3], - mac_entry->addr[4], mac_entry->addr[5]); + dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n", + mac_entry->addr); return -EINVAL; } -- cgit v1.2.3 From 20b08e1a793d898f0f13040d5418ee0955f678cf Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Mon, 14 Dec 2015 13:51:51 +0100 Subject: net: phy: mdio-mux: Check return value of mdiobus_alloc() mdiobus_alloc() might return NULL, but its return value is not checked in mdio_mux_init(). This could potentially lead to a NULL pointer dereference. Fix it by checking the return value Fixes: 0ca2997d1452 ("netdev/of/phy: Add MDIO bus multiplexer support.") Signed-off-by: Tobias Klauser Signed-off-by: David S. Miller --- drivers/net/phy/mdio-mux.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 908e8d486342..7f8e7662e28c 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -149,9 +149,14 @@ int mdio_mux_init(struct device *dev, } cb->bus_number = v; cb->parent = pb; + cb->mii_bus = mdiobus_alloc(); + if (!cb->mii_bus) { + ret_val = -ENOMEM; + of_node_put(child_bus_node); + break; + } cb->mii_bus->priv = cb; - cb->mii_bus->irq = cb->phy_irq; cb->mii_bus->name = "mdio_mux"; snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x", -- cgit v1.2.3 From d856c16d8a2b5afbc28c130ce4f5a4acadb3021d Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Fri, 11 Dec 2015 18:03:49 +0800 Subject: stmmac: dwmac-sunxi: Call exit cleanup function in probe error path dwmac-sunxi has 2 callbacks that were called from stmmac_platform as part of the probe and remove sequences. Ater the conversion of dwmac-sunxi into a standalone platform driver, the .init function is called before calling into the stmmac driver core, but .exit is not called to clean up if stmmac returns an error. This patch fixes the probe error path. This properly cleans up and releases resources when the driver core fails to probe. Cc: Joachim Eastwood Fixes: 9a9e9a1edee8 ("stmmac: dwmac-sunxi: turn setup callback into a probe function") Signed-off-by: Chen-Yu Tsai Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c index 52b8ed9bd87c..adff46375a32 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c @@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev) if (ret) return ret; - return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) + sun7i_gmac_exit(pdev, plat_dat->bsp_priv); + + return ret; } static const struct of_device_id sun7i_dwmac_match[] = { -- cgit v1.2.3 From 2274d3753f6c5a885be4cfdf8b39ae2045ba6e30 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 13 Dec 2015 01:44:50 +0300 Subject: sh_eth: uninline sh_eth_{write|read}() Commit 3365711df024 ("sh_eth: WARN on access to a register not implemented in in a particular chip") added WARN_ON() to sh_eth_{read|write}(), thus making it unacceptable for these functions to be *inline* anymore. Remove *inline* and move the functions from the header to the driver itself. Below is our code economy with ARM gcc 4.7.3: $ size drivers/net/ethernet/renesas/sh_eth.o{~,} text data bss dec hex filename 32489 1140 0 33629 835d drivers/net/ethernet/renesas/sh_eth.o~ 25413 1140 0 26553 67b9 drivers/net/ethernet/renesas/sh_eth.o Suggested-by: Ben Hutchings Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 24 ++++++++++++++++++++++++ drivers/net/ethernet/renesas/sh_eth.h | 25 ------------------------- 2 files changed, 24 insertions(+), 25 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index b1ebd7c7408c..a5e3f9fb9448 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -52,6 +52,8 @@ NETIF_MSG_RX_ERR| \ NETIF_MSG_TX_ERR) +#define SH_ETH_OFFSET_INVALID ((u16)~0) + #define SH_ETH_OFFSET_DEFAULTS \ [0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID @@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = { static void sh_eth_rcv_snd_disable(struct net_device *ndev); static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev); +static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return; + + iowrite32(data, mdp->addr + offset); +} + +static u32 sh_eth_read(struct net_device *ndev, int enum_index) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + u16 offset = mdp->reg_offset[enum_index]; + + if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) + return ~0U; + + return ioread32(mdp->addr + offset); +} + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 50382b1c9ddc..26ad1cf0bcf1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -546,31 +546,6 @@ static inline void sh_eth_soft_swap(char *src, int len) #endif } -#define SH_ETH_OFFSET_INVALID ((u16) ~0) - -static inline void sh_eth_write(struct net_device *ndev, u32 data, - int enum_index) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - u16 offset = mdp->reg_offset[enum_index]; - - if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) - return; - - iowrite32(data, mdp->addr + offset); -} - -static inline u32 sh_eth_read(struct net_device *ndev, int enum_index) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - u16 offset = mdp->reg_offset[enum_index]; - - if (WARN_ON(offset == SH_ETH_OFFSET_INVALID)) - return ~0U; - - return ioread32(mdp->addr + offset); -} - static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) { -- cgit v1.2.3 From 3e2309937f1e5d538ff13da5fb8de41196927c61 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 13 Dec 2015 21:27:04 +0300 Subject: sh_eth: fix TX buffer byte-swapping For the little-endian SH771x kernels the driver has to byte-swap the RX/TX buffers, however yet unset physcial address from the TX descriptor is used to call sh_eth_soft_swap(). Use 'skb->data' instead... Fixes: 31fcb99d9958 ("net: sh_eth: remove __flush_purge_region") Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index a5e3f9fb9448..c97b5d865bdb 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2396,8 +2396,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) txdesc = &mdp->tx_ring[entry]; /* soft swap. */ if (!mdp->cd->hw_swap) - sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)), - skb->len + 2); + sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&ndev->dev, txdesc->addr)) { -- cgit v1.2.3 From 1299653affa453bd0bdcd8112ffa392d4ba334e6 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 13 Dec 2015 23:05:07 +0300 Subject: sh_eth: fix descriptor access endianness The driver never calls cpu_to_edmac() when writing the descriptor address and edmac_to_cpu() when reading it, although it should -- fix this. Note that the frame/buffer length descriptor field accesses also need fixing but since they are both 16-bit we can't use {cpu|edmac}_to_{edmac|cpu}()... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c97b5d865bdb..a0eaf50499a2 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1196,7 +1196,7 @@ static void sh_eth_ring_format(struct net_device *ndev) break; } mdp->rx_skbuff[i] = skb; - rxdesc->addr = dma_addr; + rxdesc->addr = cpu_to_edmac(mdp, dma_addr); rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); /* Rx descriptor address set */ @@ -1427,7 +1427,8 @@ static int sh_eth_txfree(struct net_device *ndev) entry, edmac_to_cpu(mdp, txdesc->status)); /* Free the original skb. */ if (mdp->tx_skbuff[entry]) { - dma_unmap_single(&ndev->dev, txdesc->addr, + dma_unmap_single(&ndev->dev, + edmac_to_cpu(mdp, txdesc->addr), txdesc->buffer_length, DMA_TO_DEVICE); dev_kfree_skb_irq(mdp->tx_skbuff[entry]); mdp->tx_skbuff[entry] = NULL; @@ -1503,14 +1504,15 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) if (desc_status & RD_RFS10) ndev->stats.rx_over_errors++; } else if (skb) { + dma_addr = edmac_to_cpu(mdp, rxdesc->addr); if (!mdp->cd->hw_swap) sh_eth_soft_swap( - phys_to_virt(ALIGN(rxdesc->addr, 4)), + phys_to_virt(ALIGN(dma_addr, 4)), pkt_len + 2); mdp->rx_skbuff[entry] = NULL; if (mdp->cd->rpadir) skb_reserve(skb, NET_IP_ALIGN); - dma_unmap_single(&ndev->dev, rxdesc->addr, + dma_unmap_single(&ndev->dev, dma_addr, ALIGN(mdp->rx_buf_sz, 32), DMA_FROM_DEVICE); skb_put(skb, pkt_len); @@ -1547,7 +1549,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) mdp->rx_skbuff[entry] = skb; skb_checksum_none_assert(skb); - rxdesc->addr = dma_addr; + rxdesc->addr = cpu_to_edmac(mdp, dma_addr); } dma_wmb(); /* RACT bit must be set after all the above writes */ if (entry >= mdp->num_rx_ring - 1) @@ -2355,8 +2357,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev) /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < mdp->num_rx_ring; i++) { rxdesc = &mdp->rx_ring[i]; - rxdesc->status = 0; - rxdesc->addr = 0xBADF00D0; + rxdesc->status = cpu_to_edmac(mdp, 0); + rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0); dev_kfree_skb(mdp->rx_skbuff[i]); mdp->rx_skbuff[i] = NULL; } @@ -2374,6 +2376,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_txdesc *txdesc; + dma_addr_t dma_addr; u32 entry; unsigned long flags; @@ -2397,12 +2400,13 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) /* soft swap. */ if (!mdp->cd->hw_swap) sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2); - txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&ndev->dev, txdesc->addr)) { + dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&ndev->dev, dma_addr)) { kfree_skb(skb); return NETDEV_TX_OK; } + txdesc->addr = cpu_to_edmac(mdp, dma_addr); txdesc->buffer_length = skb->len; dma_wmb(); /* TACT bit must be set after all the above writes */ -- cgit v1.2.3 From 54499969c94362d4bd28ff2999ffdf0b6f359586 Mon Sep 17 00:00:00 2001 From: Kazuya Mizuguchi Date: Mon, 14 Dec 2015 00:15:58 +0900 Subject: ravb: Add disable 10base Ethernet AVB does not support 10 Mbps transfer speed. Signed-off-by: Kazuya Mizuguchi Signed-off-by: Yoshihiro Kaneko Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index b69e0c249c4f..467d41698fd5 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -905,6 +905,9 @@ static int ravb_phy_init(struct net_device *ndev) netdev_info(ndev, "limited PHY to 100Mbit/s\n"); } + /* 10BASE is not supported */ + phydev->supported &= ~PHY_10BT_FEATURES; + netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n", phydev->addr, phydev->irq, phydev->drv->name); -- cgit v1.2.3 From 09ccfd238e5a0e670d8178cf50180ea81ae09ae1 Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Mon, 14 Dec 2015 13:48:36 -0800 Subject: pptp: verify sockaddr_len in pptp_bind() and pptp_connect() Reported-by: Dmitry Vyukov Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- drivers/net/ppp/pptp.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net') diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index fc69e41d0950..597c53e0a2ec 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, struct pptp_opt *opt = &po->proto.pptp; int error = 0; + if (sockaddr_len < sizeof(struct sockaddr_pppox)) + return -EINVAL; + lock_sock(sk); opt->src_addr = sp->sa_addr.pptp; @@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, struct flowi4 fl4; int error = 0; + if (sockaddr_len < sizeof(struct sockaddr_pppox)) + return -EINVAL; + if (sp->sa_protocol != PX_PROTO_PPTP) return -EINVAL; -- cgit v1.2.3 From 2b2b31c845d3dec6f9960db92d0993ddfc2d2b7f Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Mon, 14 Dec 2015 11:05:58 +0100 Subject: net/mlx4_core: fix handling return value of mlx4_slave_convert_port The function can return negative values, so its result should be assigned to signed variable. The problem has been detected using proposed semantic patch scripts/coccinelle/tests/assign_signed_to_unsigned.cocci [1]. [1]: http://permalink.gmane.org/gmane.linux.kernel/2046107 Fixes: fc48866f7 ('net/mlx4: Adapt code for N-Port VF') Signed-off-by: Andrzej Hajda Acked-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 6fec3e993d02..cad6c44df91c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -4306,9 +4306,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, return -EOPNOTSUPP; ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; - ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port); - if (ctrl->port <= 0) + err = mlx4_slave_convert_port(dev, slave, ctrl->port); + if (err <= 0) return -EINVAL; + ctrl->port = err; qpn = be32_to_cpu(ctrl->qpn) & 0xffffff; err = get_res(dev, slave, qpn, RES_QP, &rqp); if (err) { -- cgit v1.2.3 From c7557e6a56510ff6636d40ad4ff64a3ef7d9e197 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Dec 2015 13:12:29 +0300 Subject: amd-xgbe: fix a couple timeout loops At the end of the loop we test "if (!count)" but because "count--" is a post-op then the loop will end with count set to -1. I have fixed this by changing it to --count. Fixes: c5aa9e3b8156 ('amd-xgbe: Initial AMD 10GbE platform driver') Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 970781a9e677..f6a7161e3b85 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata) usleep_range(10, 15); /* Poll Until Poll Condition */ - while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) + while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) usleep_range(500, 600); if (!count) @@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) /* Poll Until Poll Condition */ for (i = 0; i < pdata->tx_q_count; i++) { count = 2000; - while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i, + while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i, MTL_Q_TQOMR, FTQ)) usleep_range(500, 600); -- cgit v1.2.3 From 351434c6ba92e1fe7799a0c33c1412584a0fb3de Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Dec 2015 13:52:36 +0300 Subject: qlge: fix a timeout loop in ql_change_rx_buffers() The problem here is that after the loop we test for "if (!i) " but because "i--" is a post-op we exit with i set to -1. I have fixed this by changing it to a pre-op instead. I had to change the starting value from 3 to 4 so that we still iterate 3 times. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 02b7115b6aaa..997976426799 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev) /* Wait for an outstanding reset to complete. */ if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) { - int i = 3; - while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { + int i = 4; + + while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) { netif_err(qdev, ifup, qdev->ndev, "Waiting for adapter UP...\n"); ssleep(1); -- cgit v1.2.3 From fe0be35e2cb6f8f43ae70ecc9fb372142fdf096b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Dec 2015 14:06:08 +0300 Subject: sfc: fix a timeout loop We test for if "tries" is zero at the end but "tries--" is a post-op so it will end with "tries" set to -1. I have changed it to a pre-op instead. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/txc43128_phy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c index 3d5ee3259885..194f67d9f3bf 100644 --- a/drivers/net/ethernet/sfc/txc43128_phy.c +++ b/drivers/net/ethernet/sfc/txc43128_phy.c @@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd) val |= (1 << TXC_GLCMD_LMTSWRST_LBN); efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val); - while (tries--) { + while (--tries) { val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD); if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN))) break; -- cgit v1.2.3 From 389e4e04ad2d4887c7bdd7c01a93d3dfa5c14a06 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Dec 2015 16:56:16 +0300 Subject: qlcnic: fix a timeout loop The problem here is that at the end of the loop we test for if idc->vnic_wait_limit is zero, but since idc->vnic_wait_limit-- is a post-op, it actually ends up set to (u8)-1. I have fixed this by moving the decrement inside the loop. Fixes: 486a5bc77a4a ('qlcnic: Add support for 83xx suspend and resume.') Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c index be7d7a62cc0d..b1a452f291ee 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c @@ -246,7 +246,8 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter) u32 state; state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); - while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) { + while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) { + idc->vnic_wait_limit--; msleep(1000); state = QLCRDX(ahw, QLC_83XX_VNIC_STATE); } -- cgit v1.2.3 From 7bff47da1ee23d00d1257905f2944c29594f799d Mon Sep 17 00:00:00 2001 From: Hamish Martin Date: Tue, 15 Dec 2015 14:14:50 +1300 Subject: gianfar: Don't enable RX Filer if not supported After commit 15bf176db1fb ("gianfar: Don't enable the Filer w/o the Parser"), 'TSEC' model controllers (for example as seen on MPC8541E) always have 8 bytes stripped from the front of received frames. Only 'eTSEC' gianfar controllers have the RX Filer capability (amongst other enhancements). Previously this was treated as always enabled for both 'TSEC' and 'eTSEC' controllers. In commit 15bf176db1fb ("gianfar: Don't enable the Filer w/o the Parser") a subtle change was made to the setting of 'uses_rxfcb' to effectively always set it (since 'rx_filer_enable' was always true). This had the side-effect of always stripping 8 bytes from the front of received frames on 'TSEC' type controllers. We now only enable the RX Filer capability on controller types that support it, thereby avoiding the issue for 'TSEC' type controllers. Reviewed-by: Chris Packham Reviewed-by: Mark Tomlinson Signed-off-by: Hamish Martin Reviewed-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.c | 8 +++++--- drivers/net/ethernet/freescale/gianfar.h | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7cf898455e60..3e233d924cce 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -894,7 +894,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) FSL_GIANFAR_DEV_HAS_VLAN | FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | - FSL_GIANFAR_DEV_HAS_TIMER; + FSL_GIANFAR_DEV_HAS_TIMER | + FSL_GIANFAR_DEV_HAS_RX_FILER; err = of_property_read_string(np, "phy-connection-type", &ctype); @@ -1396,8 +1397,9 @@ static int gfar_probe(struct platform_device *ofdev) priv->rx_queue[i]->rxic = DEFAULT_RXIC; } - /* always enable rx filer */ - priv->rx_filer_enable = 1; + /* Always enable rx filer if available */ + priv->rx_filer_enable = + (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0; /* Enable most messages by default */ priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; /* use pritority h/w tx queue scheduling for single queue devices */ diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h index f266b20f9ef5..cb77667971a7 100644 --- a/drivers/net/ethernet/freescale/gianfar.h +++ b/drivers/net/ethernet/freescale/gianfar.h @@ -923,6 +923,7 @@ struct gfar { #define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 #define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800 #define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000 +#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000 #if (MAXGROUPS == 2) #define DEFAULT_MAPPING 0xAA -- cgit v1.2.3 From 79aa05a24f01bcb0cfbac3deea4f12d9b4f64ba9 Mon Sep 17 00:00:00 2001 From: Martin Roth Date: Tue, 15 Dec 2015 04:17:53 +0200 Subject: 82xx: FCC: Fixing a bug causing to FCC port lock-up The patch fixes FCC port lock-up, which occurs as a result of a bug during underrun/collision handling. Within the tx_startup() function in mac-fcc.c, the address of last BD is not calculated correctly. As a result of wrong calculation of the last BD address, the next transmitted BD may be set to an area out of the transmit BD ring. This actually causes to port lock-up and it is not recoverable. Signed-off-by: Martin Roth Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fs_enet/mac-fcc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 08f5b911d96b..52e0091b4fb2 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev) cbd_t __iomem *prev_bd; cbd_t __iomem *last_tx_bd; - last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t)); + last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); /* get the current bd held in TBPTR and scan back from this point */ recheck_bd = curr_tbptr = (cbd_t __iomem *) -- cgit v1.2.3 From 67894eec3e27a5b61281cb68f63b933e8c111348 Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Wed, 16 Dec 2015 22:26:05 -0800 Subject: drivers: net: xgene: fix Tx flow control Currently the Tx flow control is based on reading the hardware state, which is not accurate since it may not reflect the descriptors that are not yet reached the memory. To accurately control the Tx flow, changing it to be software based. Signed-off-by: Iyappan Subramanian Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 38 ++++++++++++++---------- drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 4 +-- 2 files changed, 24 insertions(+), 18 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 9147a0107c44..d0ae1a6cc212 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring, struct sk_buff *skb) { struct device *dev = ndev_to_dev(tx_ring->ndev); + struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev); struct xgene_enet_raw_desc *raw_desc; __le64 *exp_desc = NULL, *exp_bufs = NULL; dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr; @@ -419,6 +420,7 @@ out: raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | SET_VAL(USERINFO, tx_ring->tail)); tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; + pdata->tx_level += count; tx_ring->tail = tail; return count; @@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; - struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring; - u32 tx_level, cq_level; + u32 tx_level = pdata->tx_level; int count; - tx_level = pdata->ring_ops->len(tx_ring); - cq_level = pdata->ring_ops->len(cp_ring); - if (unlikely(tx_level > pdata->tx_qcnt_hi || - cq_level > pdata->cp_qcnt_hi)) { + if (tx_level < pdata->txc_level) + tx_level += ((typeof(pdata->tx_level))~0U); + + if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; } @@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, struct xgene_enet_raw_desc *raw_desc, *exp_desc; u16 head = ring->head; u16 slots = ring->slots - 1; - int ret, count = 0, processed = 0; + int ret, desc_count, count = 0, processed = 0; + bool is_completion; do { raw_desc = &ring->raw_desc[head]; + desc_count = 0; + is_completion = false; exp_desc = NULL; if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc))) break; @@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, } dma_rmb(); count++; + desc_count++; } - if (is_rx_desc(raw_desc)) + if (is_rx_desc(raw_desc)) { ret = xgene_enet_rx_frame(ring, raw_desc); - else + } else { ret = xgene_enet_tx_completion(ring, raw_desc); + is_completion = true; + } xgene_enet_mark_desc_slot_empty(raw_desc); if (exp_desc) xgene_enet_mark_desc_slot_empty(exp_desc); head = (head + 1) & slots; count++; + desc_count++; processed++; + if (is_completion) + pdata->txc_level += desc_count; if (ret) break; @@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, pdata->ring_ops->wr_cmd(ring, -count); ring->head = head; - if (netif_queue_stopped(ring->ndev)) { - if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low) - netif_wake_queue(ring->ndev); - } + if (netif_queue_stopped(ring->ndev)) + netif_start_queue(ring->ndev); } return processed; @@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) pdata->tx_ring->cp_ring = cp_ring; pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); - pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2; - pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2; - pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2; + pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; return 0; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index a6e56b88c0a0..1aa72c787f8d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -155,11 +155,11 @@ struct xgene_enet_pdata { enum xgene_enet_id enet_id; struct xgene_enet_desc_ring *tx_ring; struct xgene_enet_desc_ring *rx_ring; + u16 tx_level; + u16 txc_level; char *dev_name; u32 rx_buff_cnt; u32 tx_qcnt_hi; - u32 cp_qcnt_hi; - u32 cp_qcnt_low; u32 rx_irq; u32 txc_irq; u8 cq_cnt; -- cgit v1.2.3 From 0f589967a73f1f30ab4ac4dd9ce0bb399b4d6357 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Fri, 30 Oct 2015 15:16:01 +0000 Subject: xen-netback: don't use last request to determine minimum Tx credit The last from guest transmitted request gives no indication about the minimum amount of credit that the guest might need to send a packet since the last packet might have been a small one. Instead allow for the worst case 128 KiB packet. This is part of XSA155. CC: stable@vger.kernel.org Reviewed-by: Wei Liu Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- drivers/net/xen-netback/netback.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index e481f3710bd3..b683581c5d64 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue) * Allow a burst big enough to transmit a jumbo packet of up to 128kB. * Otherwise the interface can seize up due to insufficient credit. */ - max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; - max_burst = min(max_burst, 131072UL); - max_burst = max(max_burst, queue->credit_bytes); + max_burst = max(131072UL, queue->credit_bytes); /* Take care that adding a new chunk of credit doesn't wrap to zero. */ max_credit = queue->remaining_credit + queue->credit_bytes; -- cgit v1.2.3 From 68a33bfd8403e4e22847165d149823a2e0e67c9c Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Fri, 30 Oct 2015 15:17:06 +0000 Subject: xen-netback: use RING_COPY_REQUEST() throughout Instead of open-coding memcpy()s and directly accessing Tx and Rx requests, use the new RING_COPY_REQUEST() that ensures the local copy is correct. This is more than is strictly necessary for guest Rx requests since only the id and gref fields are used and it is harmless if the frontend modifies these. This is part of XSA155. CC: stable@vger.kernel.org Reviewed-by: Wei Liu Signed-off-by: David Vrabel Signed-off-by: Konrad Rzeszutek Wilk --- drivers/net/xen-netback/netback.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'drivers/net') diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index b683581c5d64..1049c34e7d43 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, struct netrx_pending_operations *npo) { struct xenvif_rx_meta *meta; - struct xen_netif_rx_request *req; + struct xen_netif_rx_request req; - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_size = 0; meta->size = 0; - meta->id = req->id; + meta->id = req.id; npo->copy_off = 0; - npo->copy_gref = req->gref; + npo->copy_gref = req.gref; return meta; } @@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, struct xenvif *vif = netdev_priv(skb->dev); int nr_frags = skb_shinfo(skb)->nr_frags; int i; - struct xen_netif_rx_request *req; + struct xen_netif_rx_request req; struct xenvif_rx_meta *meta; unsigned char *data; int head = 1; @@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb, /* Set up a GSO prefix descriptor, if necessary */ if ((1 << gso_type) & vif->gso_prefix_mask) { - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; meta->gso_type = gso_type; meta->gso_size = skb_shinfo(skb)->gso_size; meta->size = 0; - meta->id = req->id; + meta->id = req.id; } - req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); + RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req); meta = npo->meta + npo->meta_prod++; if ((1 << gso_type) & vif->gso_mask) { @@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb, } meta->size = 0; - meta->id = req->id; + meta->id = req.id; npo->copy_off = 0; - npo->copy_gref = req->gref; + npo->copy_gref = req.gref; data = skb->data; while (data < skb_tail_pointer(skb)) { @@ -709,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue, spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) break; - txp = RING_GET_REQUEST(&queue->tx, cons++); + RING_COPY_REQUEST(&queue->tx, cons++, txp); } while (1); queue->tx.req_cons = cons; } @@ -776,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, if (drop_err) txp = &dropped_tx; - memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), - sizeof(*txp)); + RING_COPY_REQUEST(&queue->tx, cons + slots, txp); /* If the guest submitted a frame >= 64 KiB then * first->size overflowed and following slots will @@ -1110,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue, return -EBADR; } - memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), - sizeof(extra)); + RING_COPY_REQUEST(&queue->tx, cons, &extra); if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { queue->tx.req_cons = ++cons; @@ -1320,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, idx = queue->tx.req_cons; rmb(); /* Ensure that we see the request before we copy it. */ - memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); + RING_COPY_REQUEST(&queue->tx, idx, &txreq); /* Credit-based scheduling. */ if (txreq.size > queue->remaining_credit && -- cgit v1.2.3