diff options
Diffstat (limited to 'drivers/net/ethernet')
36 files changed, 458 insertions, 264 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 08d11cede9c9..f5b237e0bd60 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -61,6 +61,8 @@ #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF +#define ENA_REGS_ADMIN_INTR_MASK 1 + /*****************************************************************************/ /*****************************************************************************/ /*****************************************************************************/ @@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu tail_masked = admin_queue->sq.tail & queue_size_mask; /* In case of queue FULL */ - cnt = admin_queue->sq.tail - admin_queue->sq.head; + cnt = atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { - pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", - admin_queue->sq.tail, admin_queue->sq.head, - admin_queue->q_depth); + pr_debug("admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(-ENOSPC); } @@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status) static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, struct ena_com_admin_queue *admin_queue) { - unsigned long flags; - u32 start_time; + unsigned long flags, timeout; int ret; - start_time = ((u32)jiffies_to_usecs(jiffies)); + timeout = jiffies + ADMIN_CMD_TIMEOUT_US; + + while (1) { + spin_lock_irqsave(&admin_queue->q_lock, flags); + ena_com_handle_admin_completion(admin_queue); + spin_unlock_irqrestore(&admin_queue->q_lock, flags); + + if (comp_ctx->status != ENA_CMD_SUBMITTED) + break; - while (comp_ctx->status == ENA_CMD_SUBMITTED) { - if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > - ADMIN_CMD_TIMEOUT_US) { + if (time_is_before_jiffies(timeout)) { pr_err("Wait for completion (polling) timeout\n"); /* ENA didn't have any completion */ spin_lock_irqsave(&admin_queue->q_lock, flags); @@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - spin_lock_irqsave(&admin_queue->q_lock, flags); - ena_com_handle_admin_completion(admin_queue); - spin_unlock_irqrestore(&admin_queue->q_lock, flags); - msleep(100); } @@ -1455,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) { + u32 mask_value = 0; + + if (polling) + mask_value = ENA_REGS_ADMIN_INTR_MASK; + + writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); ena_dev->admin_queue.polling = polling; } diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 67b2338f8fb3..3ee55e2fd694 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = { ENA_STAT_TX_ENTRY(tx_poll), ENA_STAT_TX_ENTRY(doorbells), ENA_STAT_TX_ENTRY(prepare_ctx_err), - ENA_STAT_TX_ENTRY(missing_tx_comp), ENA_STAT_TX_ENTRY(bad_req_id), }; @@ -94,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ENA_STAT_RX_ENTRY(dma_mapping_err), ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(rx_copybreak_pkt), + ENA_STAT_RX_ENTRY(empty_rx_ring), }; static const struct ena_stats ena_stats_ena_com_strings[] = { diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 7c1214d78855..4f16ed38bcf3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter) rxr->sgl_size = adapter->max_rx_sgl_size; rxr->smoothed_interval = ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); + rxr->empty_rx_queue = 0; } } @@ -1078,6 +1079,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, rx_ring->per_napi_bytes = 0; } +static inline void ena_unmask_interrupt(struct ena_ring *tx_ring, + struct ena_ring *rx_ring) +{ + struct ena_eth_io_intr_reg intr_reg; + + /* Update intr register: rx intr delay, + * tx intr delay and interrupt unmask + */ + ena_com_update_intr_reg(&intr_reg, + rx_ring->smoothed_interval, + tx_ring->smoothed_interval, + true); + + /* It is a shared MSI-X. + * Tx and Rx CQ have pointer to it. + * So we use one of them to reach the intr reg + */ + ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); +} + static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, struct ena_ring *rx_ring) { @@ -1108,7 +1129,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget) { struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); struct ena_ring *tx_ring, *rx_ring; - struct ena_eth_io_intr_reg intr_reg; u32 tx_work_done; u32 rx_work_done; @@ -1149,22 +1169,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget) if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) ena_adjust_intr_moderation(rx_ring, tx_ring); - /* Update intr register: rx intr delay, - * tx intr delay and interrupt unmask - */ - ena_com_update_intr_reg(&intr_reg, - rx_ring->smoothed_interval, - tx_ring->smoothed_interval, - true); - - /* It is a shared MSI-X. - * Tx and Rx CQ have pointer to it. - * So we use one of them to reach the intr reg - */ - ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); + ena_unmask_interrupt(tx_ring, rx_ring); } - ena_update_ring_numa_node(tx_ring, rx_ring); ret = rx_work_done; @@ -1485,6 +1492,11 @@ static int ena_up_complete(struct ena_adapter *adapter) ena_napi_enable_all(adapter); + /* Enable completion queues interrupt */ + for (i = 0; i < adapter->num_queues; i++) + ena_unmask_interrupt(&adapter->tx_ring[i], + &adapter->rx_ring[i]); + /* schedule napi in case we had pending packets * from the last time we disable napi */ @@ -1532,6 +1544,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) "Failed to get TX queue handlers. TX queue num %d rc: %d\n", qid, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; } ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); @@ -1596,6 +1609,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) "Failed to get RX queue handlers. RX queue num %d rc: %d\n", qid, rc); ena_com_destroy_io_queue(ena_dev, ena_qid); + return rc; } ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); @@ -1981,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) tx_info->tx_descs = nb_hw_desc; tx_info->last_jiffies = jiffies; + tx_info->print_once = 0; tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, tx_ring->ring_size); @@ -2550,13 +2565,44 @@ err: "Reset attempt failed. Can not reset the device\n"); } -static void check_for_missing_tx_completions(struct ena_adapter *adapter) +static int check_missing_comp_in_queue(struct ena_adapter *adapter, + struct ena_ring *tx_ring) { struct ena_tx_buffer *tx_buf; unsigned long last_jiffies; + u32 missed_tx = 0; + int i; + + for (i = 0; i < tx_ring->ring_size; i++) { + tx_buf = &tx_ring->tx_buffer_info[i]; + last_jiffies = tx_buf->last_jiffies; + if (unlikely(last_jiffies && + time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { + if (!tx_buf->print_once) + netif_notice(adapter, tx_err, adapter->netdev, + "Found a Tx that wasn't completed on time, qid %d, index %d.\n", + tx_ring->qid, i); + + tx_buf->print_once = 1; + missed_tx++; + + if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { + netif_err(adapter, tx_err, adapter->netdev, + "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", + missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); + set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); + return -EIO; + } + } + } + + return 0; +} + +static void check_for_missing_tx_completions(struct ena_adapter *adapter) +{ struct ena_ring *tx_ring; - int i, j, budget; - u32 missed_tx; + int i, budget, rc; /* Make sure the driver doesn't turn the device in other process */ smp_rmb(); @@ -2572,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { tx_ring = &adapter->tx_ring[i]; - for (j = 0; j < tx_ring->ring_size; j++) { - tx_buf = &tx_ring->tx_buffer_info[j]; - last_jiffies = tx_buf->last_jiffies; - if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { - netif_notice(adapter, tx_err, adapter->netdev, - "Found a Tx that wasn't completed on time, qid %d, index %d.\n", - tx_ring->qid, j); - - u64_stats_update_begin(&tx_ring->syncp); - missed_tx = tx_ring->tx_stats.missing_tx_comp++; - u64_stats_update_end(&tx_ring->syncp); - - /* Clear last jiffies so the lost buffer won't - * be counted twice. - */ - tx_buf->last_jiffies = 0; - - if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { - netif_err(adapter, tx_err, adapter->netdev, - "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", - missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); - set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); - } - } - } + rc = check_missing_comp_in_queue(adapter, tx_ring); + if (unlikely(rc)) + return; budget--; if (!budget) @@ -2606,6 +2630,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) adapter->last_monitored_tx_qid = i % adapter->num_queues; } +/* trigger napi schedule after 2 consecutive detections */ +#define EMPTY_RX_REFILL 2 +/* For the rare case where the device runs out of Rx descriptors and the + * napi handler failed to refill new Rx descriptors (due to a lack of memory + * for example). + * This case will lead to a deadlock: + * The device won't send interrupts since all the new Rx packets will be dropped + * The napi handler won't allocate new Rx descriptors so the device will be + * able to send new packets. + * + * This scenario can happen when the kernel's vm.min_free_kbytes is too small. + * It is recommended to have at least 512MB, with a minimum of 128MB for + * constrained environment). + * + * When such a situation is detected - Reschedule napi + */ +static void check_for_empty_rx_ring(struct ena_adapter *adapter) +{ + struct ena_ring *rx_ring; + int i, refill_required; + + if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) + return; + + if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) + return; + + for (i = 0; i < adapter->num_queues; i++) { + rx_ring = &adapter->rx_ring[i]; + + refill_required = + ena_com_sq_empty_space(rx_ring->ena_com_io_sq); + if (unlikely(refill_required == (rx_ring->ring_size - 1))) { + rx_ring->empty_rx_queue++; + + if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { + u64_stats_update_begin(&rx_ring->syncp); + rx_ring->rx_stats.empty_rx_ring++; + u64_stats_update_end(&rx_ring->syncp); + + netif_err(adapter, drv, adapter->netdev, + "trigger refill for ring %d\n", i); + + napi_schedule(rx_ring->napi); + rx_ring->empty_rx_queue = 0; + } + } else { + rx_ring->empty_rx_queue = 0; + } + } +} + /* Check for keep alive expiration */ static void check_for_missing_keep_alive(struct ena_adapter *adapter) { @@ -2660,6 +2736,8 @@ static void ena_timer_service(unsigned long data) check_for_missing_tx_completions(adapter); + check_for_empty_rx_ring(adapter); + if (debug_area) ena_dump_stats_to_buf(adapter, debug_area); @@ -2840,6 +2918,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) { int release_bars; + if (ena_dev->mem_bar) + devm_iounmap(&pdev->dev, ena_dev->mem_bar); + + devm_iounmap(&pdev->dev, ena_dev->reg_bar); + release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; pci_release_selected_regions(pdev, release_bars); } @@ -2927,8 +3010,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_free_ena_dev; } - ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), - pci_resource_len(pdev, ENA_REG_BAR)); + ena_dev->reg_bar = devm_ioremap(&pdev->dev, + pci_resource_start(pdev, ENA_REG_BAR), + pci_resource_len(pdev, ENA_REG_BAR)); if (!ena_dev->reg_bar) { dev_err(&pdev->dev, "failed to remap regs bar\n"); rc = -EFAULT; @@ -2948,8 +3032,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), - pci_resource_len(pdev, ENA_MEM_BAR)); + ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, + pci_resource_start(pdev, ENA_MEM_BAR), + pci_resource_len(pdev, ENA_MEM_BAR)); if (!ena_dev->mem_bar) { rc = -EFAULT; goto err_device_destroy; diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 0e22bce6239d..a4d3d5e21068 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h @@ -45,7 +45,7 @@ #define DRV_MODULE_VER_MAJOR 1 #define DRV_MODULE_VER_MINOR 1 -#define DRV_MODULE_VER_SUBMINOR 2 +#define DRV_MODULE_VER_SUBMINOR 7 #define DRV_MODULE_NAME "ena" #ifndef DRV_MODULE_VERSION @@ -146,7 +146,18 @@ struct ena_tx_buffer { u32 tx_descs; /* num of buffers used by this skb */ u32 num_of_bufs; - /* Save the last jiffies to detect missing tx packets */ + + /* Used for detect missing tx packets to limit the number of prints */ + u32 print_once; + /* Save the last jiffies to detect missing tx packets + * + * sets to non zero value on ena_start_xmit and set to zero on + * napi and timer_Service_routine. + * + * while this value is not protected by lock, + * a given packet is not expected to be handled by ena_start_xmit + * and by napi/timer_service at the same time. + */ unsigned long last_jiffies; struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; } ____cacheline_aligned; @@ -170,7 +181,6 @@ struct ena_stats_tx { u64 napi_comp; u64 tx_poll; u64 doorbells; - u64 missing_tx_comp; u64 bad_req_id; }; @@ -184,6 +194,7 @@ struct ena_stats_rx { u64 dma_mapping_err; u64 bad_desc_num; u64 rx_copybreak_pkt; + u64 empty_rx_ring; }; struct ena_ring { @@ -231,6 +242,7 @@ struct ena_ring { struct ena_stats_tx tx_stats; struct ena_stats_rx rx_stats; }; + int empty_rx_queue; } ____cacheline_aligned; struct ena_stats_dev { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index b8e3d88f0879..a66aee51ab5b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -193,9 +193,6 @@ int hw_atl_utils_hw_get_regs(struct aq_hw_s *self, struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); -int hw_atl_utils_hw_get_settings(struct aq_hw_s *self, - struct ethtool_cmd *cmd); - int hw_atl_utils_hw_set_power(struct aq_hw_s *self, unsigned int power_state); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 5f49334dcad5..f619c4cac51f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3883,15 +3883,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ + u16 vlan_tci = 0; #ifndef BNX2X_STOP_ON_ERROR - if (IS_VF(bp)) + if (IS_VF(bp)) { #endif - tx_start_bd->vlan_or_ethertype = - cpu_to_le16(ntohs(eth->h_proto)); + /* Still need to consider inband vlan for enforced */ + if (__vlan_get_tag(skb, &vlan_tci)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); + } else { + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_INBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(vlan_tci); + } #ifndef BNX2X_STOP_ON_ERROR - else + } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } #endif } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index bdfd53b46bc5..9ca994d0bab6 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -901,6 +901,8 @@ static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf) /* release VF resources */ bnx2x_vf_free_resc(bp, vf); + vf->malicious = false; + /* re-open the mailbox */ bnx2x_vf_enable_mbx(bp, vf->abs_vfid); return; @@ -1822,9 +1824,11 @@ get_vf: vf->abs_vfid, qidx); bnx2x_vf_handle_rss_update_eqe(bp, vf); case EVENT_RING_OPCODE_VF_FLR: - case EVENT_RING_OPCODE_MALICIOUS_VF: /* Do nothing for now */ return 0; + case EVENT_RING_OPCODE_MALICIOUS_VF: + vf->malicious = true; + return 0; } return 0; @@ -1905,6 +1909,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) continue; } + if (vf->malicious) { + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), + "vf %d malicious so no stats for it\n", + vf->abs_vfid); + continue; + } + DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS), "add addresses for vf %d\n", vf->abs_vfid); for_each_vfq(vf, j) { @@ -3042,7 +3053,7 @@ void bnx2x_vf_pci_dealloc(struct bnx2x *bp) { BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, sizeof(struct bnx2x_vf_mbx_msg)); - BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, + BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping, sizeof(union pf_vf_bulletin)); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h index 888d0b6632e8..53466f6cebab 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h @@ -141,6 +141,7 @@ struct bnx2x_virtf { #define VF_RESET 3 /* VF FLR'd, pending cleanup */ bool flr_clnup_stage; /* true during flr cleanup */ + bool malicious; /* true if FW indicated so, until FLR */ /* dma */ dma_addr_t fw_stat_map; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 77ed2f628f9c..53309f659951 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2171,9 +2171,10 @@ static int cxgb_up(struct adapter *adap) { int err; + mutex_lock(&uld_mutex); err = setup_sge_queues(adap); if (err) - goto out; + goto rel_lock; err = setup_rss(adap); if (err) goto freeq; @@ -2197,7 +2198,6 @@ static int cxgb_up(struct adapter *adap) goto irq_err; } - mutex_lock(&uld_mutex); enable_rx(adap); t4_sge_start(adap); t4_intr_enable(adap); @@ -2210,13 +2210,15 @@ static int cxgb_up(struct adapter *adap) #endif /* Initialize hash mac addr list*/ INIT_LIST_HEAD(&adap->mac_hlist); - out: return err; + irq_err: dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); freeq: t4_free_sge_resources(adap); - goto out; + rel_lock: + mutex_unlock(&uld_mutex); + return err; } static void cxgb_down(struct adapter *adapter) @@ -4525,7 +4527,7 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &cxgb4_mgmt_netdev_ops; dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; - dev->destructor = free_netdev; + dev->needs_free_netdev = true; } static int config_mgmt_dev(struct pci_dev *pdev) diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 9a520e4f0df9..290ad0563320 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2647,7 +2647,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ /* device used for DMA mapping */ - arch_setup_dma_ops(dev, 0, 0, NULL, false); + set_dma_ops(dev, get_dma_ops(&pdev->dev)); err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); if (err) { dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 0b31f8502ada..6e67d22fd0d5 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -623,6 +623,8 @@ static struct platform_device *dpaa_eth_add_device(int fman_id, goto no_mem; } + set_dma_ops(&pdev->dev, get_dma_ops(priv->dev)); + ret = platform_device_add_data(pdev, &data, sizeof(data)); if (ret) goto err; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index b8fab149690f..e95795b3c841 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -288,9 +288,15 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) /* Force 1000M Link, Default is 0x0200 */ phy_write(phy_dev, 7, 0x20C); - phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); - /* Enable PHY loop-back */ + /* Powerup Fiber */ + phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); + val = phy_read(phy_dev, COPPER_CONTROL_REG); + val &= ~PHY_POWER_DOWN; + phy_write(phy_dev, COPPER_CONTROL_REG, val); + + /* Enable Phy Loopback */ + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); val = phy_read(phy_dev, COPPER_CONTROL_REG); val |= PHY_LOOP_BACK; val &= ~PHY_POWER_DOWN; @@ -299,6 +305,12 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en) phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA); phy_write(phy_dev, 1, 0x400); phy_write(phy_dev, 7, 0x200); + + phy_write(phy_dev, HNS_PHY_PAGE_REG, 1); + val = phy_read(phy_dev, COPPER_CONTROL_REG); + val |= PHY_POWER_DOWN; + phy_write(phy_dev, COPPER_CONTROL_REG, val); + phy_write(phy_dev, HNS_PHY_PAGE_REG, 0); phy_write(phy_dev, 9, 0xF00); diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 508923f39ccf..259e69a52ec5 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -343,6 +343,7 @@ static int emac_reset(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; int n = 20; + bool __maybe_unused try_internal_clock = false; DBG(dev, "reset" NL); @@ -355,6 +356,7 @@ static int emac_reset(struct emac_instance *dev) } #ifdef CONFIG_PPC_DCR_NATIVE +do_retry: /* * PPC460EX/GT Embedded Processor Advanced User's Manual * section 28.10.1 Mode Register 0 (EMACx_MR0) states: @@ -362,10 +364,19 @@ static int emac_reset(struct emac_instance *dev) * of the EMAC. If none is present, select the internal clock * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). * After a soft reset, select the external clock. + * + * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the + * ethernet cable is not attached. This causes the reset to timeout + * and the PHY detection code in emac_init_phy() is unable to + * communicate and detect the AR8035-A PHY. As a result, the emac + * driver bails out early and the user has no ethernet. + * In order to stay compatible with existing configurations, the + * driver will temporarily switch to the internal clock, after + * the first reset fails. */ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: select internal loop clock before reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, 0, SDR0_ETH_CFG_ECS << dev->cell_index); @@ -383,8 +394,15 @@ static int emac_reset(struct emac_instance *dev) #ifdef CONFIG_PPC_DCR_NATIVE if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (!n && !try_internal_clock) { + /* first attempt has timed out. */ + n = 20; + try_internal_clock = true; + goto do_retry; + } + + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: restore external clock source after reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, SDR0_ETH_CFG_ECS << dev->cell_index, 0); @@ -2460,20 +2478,24 @@ static int emac_mii_bus_reset(struct mii_bus *bus) return emac_reset(dev); } +static int emac_mdio_phy_start_aneg(struct mii_phy *phy, + struct phy_device *phy_dev) +{ + phy_dev->autoneg = phy->autoneg; + phy_dev->speed = phy->speed; + phy_dev->duplex = phy->duplex; + phy_dev->advertising = phy->advertising; + return phy_start_aneg(phy_dev); +} + static int emac_mdio_setup_aneg(struct mii_phy *phy, u32 advertise) { struct net_device *ndev = phy->dev; struct emac_instance *dev = netdev_priv(ndev); - dev->phy.autoneg = AUTONEG_ENABLE; - dev->phy.speed = SPEED_1000; - dev->phy.duplex = DUPLEX_FULL; - dev->phy.advertising = advertise; phy->autoneg = AUTONEG_ENABLE; - phy->speed = dev->phy.speed; - phy->duplex = dev->phy.duplex; phy->advertising = advertise; - return phy_start_aneg(dev->phy_dev); + return emac_mdio_phy_start_aneg(phy, dev->phy_dev); } static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) @@ -2481,13 +2503,10 @@ static int emac_mdio_setup_forced(struct mii_phy *phy, int speed, int fd) struct net_device *ndev = phy->dev; struct emac_instance *dev = netdev_priv(ndev); - dev->phy.autoneg = AUTONEG_DISABLE; - dev->phy.speed = speed; - dev->phy.duplex = fd; phy->autoneg = AUTONEG_DISABLE; phy->speed = speed; phy->duplex = fd; - return phy_start_aneg(dev->phy_dev); + return emac_mdio_phy_start_aneg(phy, dev->phy_dev); } static int emac_mdio_poll_link(struct mii_phy *phy) @@ -2509,16 +2528,17 @@ static int emac_mdio_read_link(struct mii_phy *phy) { struct net_device *ndev = phy->dev; struct emac_instance *dev = netdev_priv(ndev); + struct phy_device *phy_dev = dev->phy_dev; int res; - res = phy_read_status(dev->phy_dev); + res = phy_read_status(phy_dev); if (res) return res; - dev->phy.speed = phy->speed; - dev->phy.duplex = phy->duplex; - dev->phy.pause = phy->pause; - dev->phy.asym_pause = phy->asym_pause; + phy->speed = phy_dev->speed; + phy->duplex = phy_dev->duplex; + phy->pause = phy_dev->pause; + phy->asym_pause = phy_dev->asym_pause; return 0; } @@ -2528,13 +2548,6 @@ static int emac_mdio_init_phy(struct mii_phy *phy) struct emac_instance *dev = netdev_priv(ndev); phy_start(dev->phy_dev); - dev->phy.autoneg = phy->autoneg; - dev->phy.speed = phy->speed; - dev->phy.duplex = phy->duplex; - dev->phy.advertising = phy->advertising; - dev->phy.pause = phy->pause; - dev->phy.asym_pause = phy->asym_pause; - return phy_init_hw(dev->phy_dev); } diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index a93757c255f7..c0fbeb387db4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1468,6 +1468,11 @@ static void ibmvnic_netpoll_controller(struct net_device *dev) } #endif +static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu) +{ + return -EOPNOTSUPP; +} + static const struct net_device_ops ibmvnic_netdev_ops = { .ndo_open = ibmvnic_open, .ndo_stop = ibmvnic_close, @@ -1479,6 +1484,7 @@ static const struct net_device_ops ibmvnic_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmvnic_netpoll_controller, #endif + .ndo_change_mtu = ibmvnic_change_mtu, }; /* ethtool functions */ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index cdde3cc28fb5..44d9610f7a15 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -399,6 +399,7 @@ struct i40e_pf { #define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1) #define I40E_FLAG_MSI_ENABLED BIT_ULL(2) #define I40E_FLAG_MSIX_ENABLED BIT_ULL(3) +#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4) #define I40E_FLAG_RSS_ENABLED BIT_ULL(6) #define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7) #define I40E_FLAG_IWARP_ENABLED BIT_ULL(10) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7a8eb486b9ea..894c8e57ba00 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -224,7 +224,7 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = { I40E_PRIV_FLAG("LinkPolling", I40E_FLAG_LINK_POLLING_ENABLED, 0), I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0), I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0), - I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_CAPABLE, 0), + I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0), I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0), }; @@ -4092,7 +4092,7 @@ flags_complete: /* Only allow ATR evict on hardware that is capable of handling it */ if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) - pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; + pf->flags &= ~I40E_FLAG_HW_ATR_EVICT_ENABLED; if (changed_flags & I40E_FLAG_TRUE_PROMISC_SUPPORT) { u16 sw_flags = 0, valid_flags = 0; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 150caf6ca2b4..a7a4b28b4144 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8821,11 +8821,12 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; - pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; - } else { - pf->flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } + /* Enable HW ATR eviction if possible */ + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + pf->flags |= I40E_FLAG_HW_ATR_EVICT_ENABLED; + pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index cd894f4023b1..77115c25d96f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2341,7 +2341,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->flags & I40E_FLAG_FD_ATR_AUTO_DISABLED)) return; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@ -2403,7 +2403,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 95c23fbaa211..0fb38ca78900 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3017,10 +3017,12 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, VLAN_VID_MASK)); } + spin_unlock_bh(&vsi->mac_filter_hash_lock); if (vlan_id || qos) ret = i40e_vsi_add_pvid(vsi, vlanprio); else i40e_vsi_remove_pvid(vsi); + spin_lock_bh(&vsi->mac_filter_hash_lock); if (vlan_id) { dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 9b875d776b29..33c901622ed5 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c @@ -3719,7 +3719,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, dma_addr_t *dma_addr, phys_addr_t *phys_addr) { - int cpu = smp_processor_id(); + int cpu = get_cpu(); *dma_addr = mvpp2_percpu_read(priv, cpu, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id)); @@ -3740,6 +3740,8 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv, if (sizeof(phys_addr_t) == 8) *phys_addr |= (u64)phys_addr_highbits << 32; } + + put_cpu(); } /* Free all buffers from the pool */ @@ -3920,18 +3922,12 @@ static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool) return bm; } -/* Get pool number from a BM cookie */ -static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie) -{ - return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; -} - /* Release buffer to BM */ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, dma_addr_t buf_dma_addr, phys_addr_t buf_phys_addr) { - int cpu = smp_processor_id(); + int cpu = get_cpu(); if (port->priv->hw_version == MVPP22) { u32 val = 0; @@ -3958,15 +3954,15 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr); mvpp2_percpu_write(port->priv, cpu, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr); + + put_cpu(); } /* Refill BM pool */ -static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm, +static void mvpp2_pool_refill(struct mvpp2_port *port, int pool, dma_addr_t dma_addr, phys_addr_t phys_addr) { - int pool = mvpp2_bm_cookie_pool_get(bm); - mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr); } @@ -4186,8 +4182,6 @@ static void mvpp22_port_mii_set(struct mvpp2_port *port) { u32 val; - return; - /* Only GOP port 0 has an XLG MAC */ if (port->gop_id == 0) { val = readl(port->base + MVPP22_XLG_CTRL3_REG); @@ -4515,21 +4509,6 @@ static void mvpp2_rxq_offset_set(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); } -/* Obtain BM cookie information from descriptor */ -static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port, - struct mvpp2_rx_desc *rx_desc) -{ - int cpu = smp_processor_id(); - int pool; - - pool = (mvpp2_rxdesc_status_get(port, rx_desc) & - MVPP2_RXD_BM_POOL_ID_MASK) >> - MVPP2_RXD_BM_POOL_ID_OFFS; - - return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | - ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); -} - /* Tx descriptors helper methods */ /* Get pointer to next Tx descriptor to be processed (send) by HW */ @@ -4757,7 +4736,7 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq) { - int cpu = smp_processor_id(); + int cpu = get_cpu(); if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK) rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK; @@ -4765,6 +4744,8 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal); + + put_cpu(); } static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz) @@ -4945,7 +4926,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); /* Set Rx descriptors queue starting address - indirect access */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); if (port->priv->hw_version == MVPP21) rxq_dma = rxq->descs_dma; @@ -4954,6 +4935,7 @@ static int mvpp2_rxq_init(struct mvpp2_port *port, mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_INDEX_REG, 0); + put_cpu(); /* Set Offset */ mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD); @@ -4980,9 +4962,13 @@ static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port, for (i = 0; i < rx_received; i++) { struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); - u32 bm = mvpp2_bm_cookie_build(port, rx_desc); + u32 status = mvpp2_rxdesc_status_get(port, rx_desc); + int pool; + + pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; - mvpp2_pool_refill(port, bm, + mvpp2_pool_refill(port, pool, mvpp2_rxdesc_dma_addr_get(port, rx_desc), mvpp2_rxdesc_cookie_get(port, rx_desc)); } @@ -5012,10 +4998,11 @@ static void mvpp2_rxq_deinit(struct mvpp2_port *port, * free descriptor number */ mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_NUM_REG, rxq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_ADDR_REG, 0); mvpp2_percpu_write(port->priv, cpu, MVPP2_RXQ_DESC_SIZE_REG, 0); + put_cpu(); } /* Create and initialize a Tx queue */ @@ -5038,7 +5025,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, txq->last_desc = txq->size - 1; /* Set Tx descriptors queue starting address - indirect access */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma); @@ -5063,6 +5050,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH(desc_per_txq / 2)); + put_cpu(); /* WRR / EJP configuration - indirect access */ tx_port_num = mvpp2_egress_port(port); @@ -5133,10 +5121,11 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); /* Set Tx descriptors queue starting address and size */ - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_ADDR_REG, 0); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_DESC_SIZE_REG, 0); + put_cpu(); } /* Cleanup Tx ports */ @@ -5146,7 +5135,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) int delay, pending, cpu; u32 val; - cpu = smp_processor_id(); + cpu = get_cpu(); mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_NUM_REG, txq->id); val = mvpp2_percpu_read(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG); val |= MVPP2_TXQ_DRAIN_EN_MASK; @@ -5173,6 +5162,7 @@ static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq) val &= ~MVPP2_TXQ_DRAIN_EN_MASK; mvpp2_percpu_write(port->priv, cpu, MVPP2_TXQ_PREF_BUF_REG, val); + put_cpu(); for_each_present_cpu(cpu) { txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); @@ -5420,7 +5410,7 @@ static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status, /* Reuse skb if possible, or allocate a new skb and add it to BM pool */ static int mvpp2_rx_refill(struct mvpp2_port *port, - struct mvpp2_bm_pool *bm_pool, u32 bm) + struct mvpp2_bm_pool *bm_pool, int pool) { dma_addr_t dma_addr; phys_addr_t phys_addr; @@ -5432,7 +5422,7 @@ static int mvpp2_rx_refill(struct mvpp2_port *port, if (!buf) return -ENOMEM; - mvpp2_pool_refill(port, bm, dma_addr, phys_addr); + mvpp2_pool_refill(port, pool, dma_addr, phys_addr); return 0; } @@ -5490,7 +5480,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, unsigned int frag_size; dma_addr_t dma_addr; phys_addr_t phys_addr; - u32 bm, rx_status; + u32 rx_status; int pool, rx_bytes, err; void *data; @@ -5502,8 +5492,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo, phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc); data = (void *)phys_to_virt(phys_addr); - bm = mvpp2_bm_cookie_build(port, rx_desc); - pool = mvpp2_bm_cookie_pool_get(bm); + pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; bm_pool = &port->priv->bm_pools[pool]; /* In case of an error, release the requested buffer pointer @@ -5516,7 +5506,7 @@ err_drop_frame: dev->stats.rx_errors++; mvpp2_rx_error(port, rx_desc); /* Return the buffer to the pool */ - mvpp2_pool_refill(port, bm, dma_addr, phys_addr); + mvpp2_pool_refill(port, pool, dma_addr, phys_addr); continue; } @@ -5531,7 +5521,7 @@ err_drop_frame: goto err_drop_frame; } - err = mvpp2_rx_refill(port, bm_pool, bm); + err = mvpp2_rx_refill(port, bm_pool, pool); if (err) { netdev_err(port->dev, "failed to refill BM pools\n"); goto err_drop_frame; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 2fd044b23875..944fc1742464 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -458,13 +458,15 @@ struct mlx5e_mpw_info { struct mlx5e_rx_am_stats { int ppms; /* packets per msec */ + int bpms; /* bytes per msec */ int epms; /* events per msec */ }; struct mlx5e_rx_am_sample { - ktime_t time; - unsigned int pkt_ctr; - u16 event_ctr; + ktime_t time; + u32 pkt_ctr; + u32 byte_ctr; + u16 event_ctr; }; struct mlx5e_rx_am { /* Adaptive Moderation */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 8209affa75c3..16486dff1493 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1242,11 +1242,11 @@ static int mlx5e_get_ts_info(struct net_device *dev, SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) | - (BIT(1) << HWTSTAMP_TX_ON); + info->tx_types = BIT(HWTSTAMP_TX_OFF) | + BIT(HWTSTAMP_TX_ON); - info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) | - (BIT(1) << HWTSTAMP_FILTER_ALL); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | + BIT(HWTSTAMP_FILTER_ALL); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 41cd22a223dc..277f4de30375 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4241,7 +4241,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, return netdev; err_cleanup_nic: - profile->cleanup(priv); + if (profile->cleanup) + profile->cleanup(priv); free_netdev(netdev); return NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 79462c0368a0..46984a52a94b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -791,6 +791,8 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, params->tx_max_inline = mlx5e_get_max_inline_cap(mdev); params->num_tc = 1; params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; + + mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); } static void mlx5e_build_rep_netdev(struct net_device *netdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c index 02dd3a95ed8f..acf32fe952cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c @@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am) mlx5e_am_step(am); } +#define IS_SIGNIFICANT_DIFF(val, ref) \ + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ + static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr, struct mlx5e_rx_am_stats *prev) { - int diff; - - if (!prev->ppms) - return curr->ppms ? MLX5E_AM_STATS_BETTER : + if (!prev->bpms) + return curr->bpms ? MLX5E_AM_STATS_BETTER : MLX5E_AM_STATS_SAME; - diff = curr->ppms - prev->ppms; - if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */ - return (diff > 0) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) + return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; - if (!prev->epms) - return curr->epms ? MLX5E_AM_STATS_WORSE : - MLX5E_AM_STATS_SAME; + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) + return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; - diff = curr->epms - prev->epms; - if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */ - return (diff < 0) ? MLX5E_AM_STATS_BETTER : - MLX5E_AM_STATS_WORSE; + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) + return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER : + MLX5E_AM_STATS_WORSE; return MLX5E_AM_STATS_SAME; } @@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq, { s->time = ktime_get(); s->pkt_ctr = rq->stats.packets; + s->byte_ctr = rq->stats.bytes; s->event_ctr = rq->cq.event_ctr; } #define MLX5E_AM_NEVENTS 64 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, struct mlx5e_rx_am_sample *end, @@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start, { /* u32 holds up to 71 minutes, should be enough */ u32 delta_us = ktime_us_delta(end->time, start->time); - unsigned int npkts = end->pkt_ctr - start->pkt_ctr; + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, + start->byte_ctr); if (!delta_us) return; - curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us; - curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us; + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); + curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC, + delta_us); } void mlx5e_rx_am_work(struct work_struct *work) @@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq) switch (am->state) { case MLX5E_AM_MEASURE_IN_PROGRESS: - nevents = rq->cq.event_ctr - am->start_sample.event_ctr; + nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr, + am->start_sample.event_ctr); if (nevents < MLX5E_AM_NEVENTS) break; mlx5e_am_sample(rq, &end_sample); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 53e4992d6511..f81c3aa60b46 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -417,20 +417,13 @@ struct mlx5e_stats { }; static const struct counter_desc mlx5e_pme_status_desc[] = { - { "module_plug", 0 }, { "module_unplug", 8 }, }; static const struct counter_desc mlx5e_pme_error_desc[] = { - { "module_pwr_budget_exd", 0 }, /* power budget exceed */ - { "module_long_range", 8 }, /* long range for non MLNX cable */ - { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ - { "module_no_eeprom", 24 }, /* no eeprom/retry time out */ - { "module_enforce_part", 32 }, /* enforce part number list */ - { "module_unknown_id", 40 }, /* unknown identifier */ - { "module_high_temp", 48 }, /* high temperature */ + { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ + { "module_high_temp", 48 }, /* high temperature */ { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ - { "module_unknown_status", 64 }, }; #endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ec63158ab643..9df9fc0d26f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -895,7 +895,6 @@ static struct mlx5_fields fields[] = { {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])}, {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)}, - {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)}, {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)}, {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)}, {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)}, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f991f669047e..a53e982a6863 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -906,21 +906,34 @@ static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode) return 0; } -int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +static int mlx5_devlink_eswitch_check(struct devlink *devlink) { - struct mlx5_core_dev *dev; - u16 cur_mlx5_mode, mlx5_mode = 0; + struct mlx5_core_dev *dev = devlink_priv(devlink); - dev = devlink_priv(devlink); + if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) + return -EOPNOTSUPP; if (!MLX5_CAP_GEN(dev, vport_group_manager)) return -EOPNOTSUPP; - cur_mlx5_mode = dev->priv.eswitch->mode; - - if (cur_mlx5_mode == SRIOV_NONE) + if (dev->priv.eswitch->mode == SRIOV_NONE) return -EOPNOTSUPP; + return 0; +} + +int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + u16 cur_mlx5_mode, mlx5_mode = 0; + int err; + + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; + + cur_mlx5_mode = dev->priv.eswitch->mode; + if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; @@ -937,15 +950,12 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode) int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { - struct mlx5_core_dev *dev; - - dev = devlink_priv(devlink); - - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; + struct mlx5_core_dev *dev = devlink_priv(devlink); + int err; - if (dev->priv.eswitch->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; return esw_mode_to_devlink(dev->priv.eswitch->mode, mode); } @@ -954,15 +964,12 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; - int num_vports = esw->enabled_vports; int err, vport; u8 mlx5_mode; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (esw->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: @@ -985,7 +992,7 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) if (err) goto out; - for (vport = 1; vport < num_vports; vport++) { + for (vport = 1; vport < esw->enabled_vports; vport++) { err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode); if (err) { esw_warn(dev, "Failed to set min inline on vport %d\n", @@ -1010,12 +1017,11 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (esw->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); } @@ -1062,11 +1068,9 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap) struct mlx5_eswitch *esw = dev->priv.eswitch; int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (esw->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE && (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) || @@ -1105,12 +1109,11 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap) { struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_eswitch *esw = dev->priv.eswitch; + int err; - if (!MLX5_CAP_GEN(dev, vport_group_manager)) - return -EOPNOTSUPP; - - if (esw->mode == SRIOV_NONE) - return -EOPNOTSUPP; + err = mlx5_devlink_eswitch_check(devlink); + if (err) + return err; *encap = esw->offloads.encap; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 0e487e8ca634..8f5125ccd8d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -862,7 +862,7 @@ struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace ft_attr.level = level; ft_attr.prio = prio; - return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, 0); + return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_NORMAL, vport); } struct mlx5_flow_table* diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 44f59b1d6f0f..f27f84ffbc85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -275,10 +275,8 @@ static void poll_health(unsigned long data) struct mlx5_core_health *health = &dev->priv.health; u32 count; - if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { - mod_timer(&health->timer, get_next_poll_jiffies()); - return; - } + if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto out; count = ioread32be(health->health_counter); if (count == health->prev) @@ -290,8 +288,6 @@ static void poll_health(unsigned long data) if (health->miss_counter == MAX_MISSES) { dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); print_health_info(dev); - } else { - mod_timer(&health->timer, get_next_poll_jiffies()); } if (in_fatal(dev) && !health->sick) { @@ -305,6 +301,9 @@ static void poll_health(unsigned long data) "new health works are not permitted at this stage\n"); spin_unlock(&health->wq_lock); } + +out: + mod_timer(&health->timer, get_next_poll_jiffies()); } void mlx5_start_health_poll(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index af945edfee19..13be264587f1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -175,8 +175,9 @@ static struct mlx5_profile profile[] = { }, }; -#define FW_INIT_TIMEOUT_MILI 2000 -#define FW_INIT_WAIT_MS 2 +#define FW_INIT_TIMEOUT_MILI 2000 +#define FW_INIT_WAIT_MS 2 +#define FW_PRE_INIT_TIMEOUT_MILI 10000 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili) { @@ -537,8 +538,10 @@ static int handle_hca_cap(struct mlx5_core_dev *dev) /* disable cmdif checksum */ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); - /* If the HCA supports 4K UARs use it */ - if (MLX5_CAP_GEN_MAX(dev, uar_4k)) + /* Enable 4K UAR only when HCA supports it and page size is bigger + * than 4K. + */ + if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096) MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1); MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12); @@ -1011,6 +1014,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, */ dev->state = MLX5_DEVICE_STATE_UP; + /* wait for firmware to accept initialization segments configurations + */ + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI); + if (err) { + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n", + FW_PRE_INIT_TIMEOUT_MILI); + goto out; + } + err = mlx5_cmd_init(dev); if (err) { dev_err(&pdev->dev, "Failed initializing command interface, aborting\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c index 483241b4b05d..a672f6a860dc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_debug.c +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -2956,7 +2956,7 @@ static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, qed_wr(p_hwfn, p_ptt, s_storm_defs[storm_id].cm_ctx_wr_addr, - BIT(9) | lid); + (i << 9) | lid); *(dump_buf + offset) = qed_rd(p_hwfn, p_ptt, rd_reg_addr); diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index b7e4345c990d..019cef1d3cf7 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -661,8 +661,6 @@ restore_filters: up_write(&vf->efx->filter_sem); mutex_unlock(&vf->efx->mac_lock); - up_write(&vf->efx->filter_sem); - rc2 = efx_net_open(vf->efx->net_dev); if (rc2) goto reset_nic; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index aa6476439aee..e0ef02f9503b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c @@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) { /* Context type from W/B descriptor must be zero */ if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE) - return -EINVAL; + return 0; /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS) - return 0; + return 1; - return 1; + return 0; } static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) @@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) } } exit: - return ret; + if (likely(ret == 0)) + return 1; + + return 0; } static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 12236daf7bb6..6e4cbc6ce0ef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -434,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, return; /* check tx tstamp status */ - if (!priv->hw->desc->get_tx_timestamp_status(p)) { + if (priv->hw->desc->get_tx_timestamp_status(p)) { /* get the valid tstamp */ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp.hwtstamp = ns_to_ktime(ns); - netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); /* pass tstamp to stack */ skb_tstamp_tx(skb, &shhwtstamp); } @@ -468,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, return; /* Check if timestamp is available */ - if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { + if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { /* For GMAC4, the valid timestamp is from CTX next desc. */ if (priv->plat->has_gmac4) ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); else ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); - netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); shhwtstamp = skb_hwtstamps(skb); memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamp->hwtstamp = ns_to_ktime(ns); } else { - netdev_err(priv->dev, "cannot get RX hw timestamp\n"); + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); } } @@ -546,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) /* PTP v1, UDP, any kind of event packet */ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@ -578,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@ -612,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ptp_v2 = PTP_TCR_TSVER2ENA; /* take time stamp for all event messages */ - snap_type_sel = PTP_TCR_SNAPTYPSEL_1; + if (priv->plat->has_gmac4) + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1; + else + snap_type_sel = PTP_TCR_SNAPTYPSEL_1; ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; @@ -2822,7 +2831,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); - tx_q->tx_skbuff[first_entry] = skb; first->des0 = cpu_to_le32(des); @@ -2856,6 +2864,14 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; + /* Only the last descriptor gets to point to the skb. */ + tx_q->tx_skbuff[tx_q->cur_tx] = skb; + + /* We've used all descriptors we need for this skb, however, + * advance cur_tx so that it references a fresh descriptor. + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE); if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { @@ -2989,8 +3005,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first = desc; - tx_q->tx_skbuff[first_entry] = skb; - enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) @@ -3038,8 +3052,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) skb->len); } - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + /* Only the last descriptor gets to point to the skb. */ + tx_q->tx_skbuff[entry] = skb; + /* We've used all descriptors we need for this skb, however, + * advance cur_tx so that it references a fresh descriptor. + * ndo_start_xmit will fill this descriptor the next time it's + * called and stmmac_tx_clean may clean up to this descriptor. + */ + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); tx_q->cur_tx = entry; if (netif_msg_pktdata(priv)) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 48fb72fc423c..f4b31d69f60e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -59,7 +59,8 @@ /* Enable Snapshot for Messages Relevant to Master */ #define PTP_TCR_TSMSTRENA BIT(15) /* Select PTP packets for Taking Snapshots */ -#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) +#define PTP_TCR_SNAPTYPSEL_1 BIT(16) +#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16) /* Enable MAC address for PTP Frame Filtering */ #define PTP_TCR_TSENMACADDR BIT(18) |