diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/can/c_can/c_can.h | 17 | ||||
-rw-r--r-- | drivers/net/can/c_can/c_can_main.c | 11 | ||||
-rw-r--r-- | drivers/net/dsa/mt7530.c | 19 | ||||
-rw-r--r-- | drivers/net/ethernet/cadence/macb_main.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 28 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_txrx.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_xsk.c | 163 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_xsk.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_soc.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/mscc/ocelot.c | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/sfc/ef10.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 23 | ||||
-rw-r--r-- | drivers/net/hippi/rrunner.c | 1 | ||||
-rw-r--r-- | drivers/net/phy/phy_device.c | 10 | ||||
-rw-r--r-- | drivers/net/tun.c | 9 | ||||
-rw-r--r-- | drivers/net/usb/qmi_wwan.c | 1 | ||||
-rw-r--r-- | drivers/net/usb/usbnet.c | 7 |
19 files changed, 174 insertions, 149 deletions
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h index f23a03300a81..029cd8194ed5 100644 --- a/drivers/net/can/c_can/c_can.h +++ b/drivers/net/can/c_can/c_can.h @@ -235,9 +235,22 @@ static inline u8 c_can_get_tx_tail(const struct c_can_tx_ring *ring) return ring->tail & (ring->obj_num - 1); } -static inline u8 c_can_get_tx_free(const struct c_can_tx_ring *ring) +static inline u8 c_can_get_tx_free(const struct c_can_priv *priv, + const struct c_can_tx_ring *ring) { - return ring->obj_num - (ring->head - ring->tail); + u8 head = c_can_get_tx_head(ring); + u8 tail = c_can_get_tx_tail(ring); + + if (priv->type == BOSCH_D_CAN) + return ring->obj_num - (ring->head - ring->tail); + + /* This is not a FIFO. C/D_CAN sends out the buffers + * prioritized. The lowest buffer number wins. + */ + if (head < tail) + return 0; + + return ring->obj_num - head; } #endif /* C_CAN_H */ diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index dc8132862f33..d6605dbb7737 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -429,7 +429,7 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface, static bool c_can_tx_busy(const struct c_can_priv *priv, const struct c_can_tx_ring *tx_ring) { - if (c_can_get_tx_free(tx_ring) > 0) + if (c_can_get_tx_free(priv, tx_ring) > 0) return false; netif_stop_queue(priv->dev); @@ -437,7 +437,7 @@ static bool c_can_tx_busy(const struct c_can_priv *priv, /* Memory barrier before checking tx_free (head and tail) */ smp_mb(); - if (c_can_get_tx_free(tx_ring) == 0) { + if (c_can_get_tx_free(priv, tx_ring) == 0) { netdev_dbg(priv->dev, "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", tx_ring->head, tx_ring->tail, @@ -465,7 +465,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, idx = c_can_get_tx_head(tx_ring); tx_ring->head++; - if (c_can_get_tx_free(tx_ring) == 0) + if (c_can_get_tx_free(priv, tx_ring) == 0) netif_stop_queue(dev); if (idx < c_can_get_tx_tail(tx_ring)) @@ -748,7 +748,7 @@ static void c_can_do_tx(struct net_device *dev) return; tx_ring->tail += pkts; - if (c_can_get_tx_free(tx_ring)) { + if (c_can_get_tx_free(priv, tx_ring)) { /* Make sure that anybody stopping the queue after * this sees the new tx_ring->tail. */ @@ -760,8 +760,7 @@ static void c_can_do_tx(struct net_device *dev) stats->tx_packets += pkts; tail = c_can_get_tx_tail(tx_ring); - - if (tail == 0) { + if (priv->type == BOSCH_D_CAN && tail == 0) { u8 head = c_can_get_tx_head(tx_ring); /* Start transmission for all cached messages */ diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 835807911be0..409d5c3d76ea 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -506,14 +506,19 @@ static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv) static int mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) { - struct mt7530_priv *priv = ds->priv; + return 0; +} + +static void +mt7531_pll_setup(struct mt7530_priv *priv) +{ u32 top_sig; u32 hwstrap; u32 xtal; u32 val; if (mt7531_dual_sgmii_supported(priv)) - return 0; + return; val = mt7530_read(priv, MT7531_CREV); top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR); @@ -592,8 +597,6 @@ mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface) val |= EN_COREPLL; mt7530_write(priv, MT7531_PLLGP_EN, val); usleep_range(25, 35); - - return 0; } static void @@ -2326,11 +2329,17 @@ mt7531_setup(struct dsa_switch *ds) return -ENODEV; } + /* all MACs must be forced link-down before sw reset */ + for (i = 0; i < MT7530_NUM_PORTS; i++) + mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK); + /* Reset the switch through internal reset */ mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST | SYS_CTRL_REG_RST); + mt7531_pll_setup(priv); + if (mt7531_dual_sgmii_supported(priv)) { priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII; @@ -2887,8 +2896,6 @@ mt7531_cpu_port_config(struct dsa_switch *ds, int port) case 6: interface = PHY_INTERFACE_MODE_2500BASEX; - mt7531_pad_setup(ds, interface); - priv->p6_interface = interface; break; default: diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 66c7d08d376a..a2897549f9c4 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -5109,6 +5109,7 @@ static int __maybe_unused macb_suspend(struct device *dev) if (!(bp->wol & MACB_WOL_ENABLED)) { rtnl_lock(); phylink_stop(bp->phylink); + phy_exit(bp->sgmii_phy); rtnl_unlock(); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); @@ -5198,6 +5199,9 @@ static int __maybe_unused macb_resume(struct device *dev) macb_set_rx_mode(netdev); macb_restore_features(bp); rtnl_lock(); + if (!device_may_wakeup(&bp->dev->dev)) + phy_init(bp->sgmii_phy); + phylink_start(bp->phylink); rtnl_unlock(); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c index a7f291c89702..557c591a6ce3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c @@ -14,6 +14,7 @@ #include "cudbg_entity.h" #include "cudbg_lib.h" #include "cudbg_zlib.h" +#include "cxgb4_tc_mqprio.h" static const u32 t6_tp_pio_array[][IREG_NUM_ELEM] = { {0x7e40, 0x7e44, 0x020, 28}, /* t6_tp_pio_regs_20_to_3b */ @@ -3458,7 +3459,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < utxq->ntxq; i++) QDESC_GET_TXQ(&utxq->uldtxq[i].q, cudbg_uld_txq_to_qtype(j), - out_unlock); + out_unlock_uld); } } @@ -3475,7 +3476,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nrxq; i++) QDESC_GET_RXQ(&urxq->uldrxq[i].rspq, cudbg_uld_rxq_to_qtype(j), - out_unlock); + out_unlock_uld); } /* ULD FLQ */ @@ -3487,7 +3488,7 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nrxq; i++) QDESC_GET_FLQ(&urxq->uldrxq[i].fl, cudbg_uld_flq_to_qtype(j), - out_unlock); + out_unlock_uld); } /* ULD CIQ */ @@ -3500,29 +3501,34 @@ int cudbg_collect_qdesc(struct cudbg_init *pdbg_init, for (i = 0; i < urxq->nciq; i++) QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq, cudbg_uld_ciq_to_qtype(j), - out_unlock); + out_unlock_uld); } } + mutex_unlock(&uld_mutex); + + if (!padap->tc_mqprio) + goto out; + mutex_lock(&padap->tc_mqprio->mqprio_mutex); /* ETHOFLD TXQ */ if (s->eohw_txq) for (i = 0; i < s->eoqsets; i++) QDESC_GET_TXQ(&s->eohw_txq[i].q, - CUDBG_QTYPE_ETHOFLD_TXQ, out); + CUDBG_QTYPE_ETHOFLD_TXQ, out_unlock_mqprio); /* ETHOFLD RXQ and FLQ */ if (s->eohw_rxq) { for (i = 0; i < s->eoqsets; i++) QDESC_GET_RXQ(&s->eohw_rxq[i].rspq, - CUDBG_QTYPE_ETHOFLD_RXQ, out); + CUDBG_QTYPE_ETHOFLD_RXQ, out_unlock_mqprio); for (i = 0; i < s->eoqsets; i++) QDESC_GET_FLQ(&s->eohw_rxq[i].fl, - CUDBG_QTYPE_ETHOFLD_FLQ, out); + CUDBG_QTYPE_ETHOFLD_FLQ, out_unlock_mqprio); } -out_unlock: - mutex_unlock(&uld_mutex); +out_unlock_mqprio: + mutex_unlock(&padap->tc_mqprio->mqprio_mutex); out: qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry); @@ -3559,6 +3565,10 @@ out_free: #undef QDESC_GET return rc; + +out_unlock_uld: + mutex_unlock(&uld_mutex); + goto out; } int cudbg_collect_flash(struct cudbg_init *pdbg_init, diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 97453d1dfafe..dd2285d4bef4 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1467,7 +1467,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) bool wd; if (tx_ring->xsk_pool) - wd = ice_xmit_zc(tx_ring, ICE_DESC_UNUSED(tx_ring), budget); + wd = ice_xmit_zc(tx_ring); else if (ice_ring_is_xdp(tx_ring)) wd = true; else diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 03ce85f6e6df..056c904b83cc 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -392,13 +392,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) goto failure; } - if (!is_power_of_2(vsi->rx_rings[qid]->count) || - !is_power_of_2(vsi->tx_rings[qid]->count)) { - netdev_err(vsi->netdev, "Please align ring sizes to power of 2\n"); - pool_failure = -EINVAL; - goto failure; - } - if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); if (if_running) { @@ -534,11 +527,10 @@ exit: bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) { u16 rx_thresh = ICE_RING_QUARTER(rx_ring); - u16 batched, leftover, i, tail_bumps; + u16 leftover, i, tail_bumps; - batched = ALIGN_DOWN(count, rx_thresh); - tail_bumps = batched / rx_thresh; - leftover = count & (rx_thresh - 1); + tail_bumps = count / rx_thresh; + leftover = count - (tail_bumps * rx_thresh); for (i = 0; i < tail_bumps; i++) if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh)) @@ -788,69 +780,57 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf) } /** - * ice_clean_xdp_irq_zc - Reclaim resources after transmit completes on XDP ring - * @xdp_ring: XDP ring to clean - * @napi_budget: amount of descriptors that NAPI allows us to clean - * - * Returns count of cleaned descriptors + * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ + * @xdp_ring: XDP Tx ring */ -static u16 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, int napi_budget) +static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) { - u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); - int budget = napi_budget / tx_thresh; - u16 next_dd = xdp_ring->next_dd; - u16 ntc, cleared_dds = 0; - - do { - struct ice_tx_desc *next_dd_desc; - u16 desc_cnt = xdp_ring->count; - struct ice_tx_buf *tx_buf; - u32 xsk_frames; - u16 i; - - next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd); - if (!(next_dd_desc->cmd_type_offset_bsz & - cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) - break; + u16 ntc = xdp_ring->next_to_clean; + struct ice_tx_desc *tx_desc; + u16 cnt = xdp_ring->count; + struct ice_tx_buf *tx_buf; + u16 xsk_frames = 0; + u16 last_rs; + int i; - cleared_dds++; - xsk_frames = 0; - if (likely(!xdp_ring->xdp_tx_active)) { - xsk_frames = tx_thresh; - goto skip; - } + last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1; + tx_desc = ICE_TX_DESC(xdp_ring, last_rs); + if ((tx_desc->cmd_type_offset_bsz & + cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) { + if (last_rs >= ntc) + xsk_frames = last_rs - ntc + 1; + else + xsk_frames = last_rs + cnt - ntc + 1; + } - ntc = xdp_ring->next_to_clean; + if (!xsk_frames) + return; - for (i = 0; i < tx_thresh; i++) { - tx_buf = &xdp_ring->tx_buf[ntc]; + if (likely(!xdp_ring->xdp_tx_active)) + goto skip; - if (tx_buf->raw_buf) { - ice_clean_xdp_tx_buf(xdp_ring, tx_buf); - tx_buf->raw_buf = NULL; - } else { - xsk_frames++; - } + ntc = xdp_ring->next_to_clean; + for (i = 0; i < xsk_frames; i++) { + tx_buf = &xdp_ring->tx_buf[ntc]; - ntc++; - if (ntc >= xdp_ring->count) - ntc = 0; + if (tx_buf->raw_buf) { + ice_clean_xdp_tx_buf(xdp_ring, tx_buf); + tx_buf->raw_buf = NULL; + } else { + xsk_frames++; } + + ntc++; + if (ntc >= xdp_ring->count) + ntc = 0; + } skip: - xdp_ring->next_to_clean += tx_thresh; - if (xdp_ring->next_to_clean >= desc_cnt) - xdp_ring->next_to_clean -= desc_cnt; - if (xsk_frames) - xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); - next_dd_desc->cmd_type_offset_bsz = 0; - next_dd = next_dd + tx_thresh; - if (next_dd >= desc_cnt) - next_dd = tx_thresh - 1; - } while (--budget); - - xdp_ring->next_dd = next_dd; - - return cleared_dds * tx_thresh; + tx_desc->cmd_type_offset_bsz = 0; + xdp_ring->next_to_clean += xsk_frames; + if (xdp_ring->next_to_clean >= cnt) + xdp_ring->next_to_clean -= cnt; + if (xsk_frames) + xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); } /** @@ -885,7 +865,6 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc, static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, unsigned int *total_bytes) { - u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); u16 ntu = xdp_ring->next_to_use; struct ice_tx_desc *tx_desc; u32 i; @@ -905,13 +884,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de } xdp_ring->next_to_use = ntu; - - if (xdp_ring->next_to_use > xdp_ring->next_rs) { - tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); - tx_desc->cmd_type_offset_bsz |= - cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); - xdp_ring->next_rs += tx_thresh; - } } /** @@ -924,7 +896,6 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, unsigned int *total_bytes) { - u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); u32 batched, leftover, i; batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH); @@ -933,54 +904,54 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); for (; i < batched + leftover; i++) ice_xmit_pkt(xdp_ring, &descs[i], total_bytes); +} - if (xdp_ring->next_to_use > xdp_ring->next_rs) { - struct ice_tx_desc *tx_desc; +/** + * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU) + * @xdp_ring: XDP ring to produce the HW Tx descriptors on + */ +static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring) +{ + u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1; + struct ice_tx_desc *tx_desc; - tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); - tx_desc->cmd_type_offset_bsz |= - cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); - xdp_ring->next_rs += tx_thresh; - } + tx_desc = ICE_TX_DESC(xdp_ring, ntu); + tx_desc->cmd_type_offset_bsz |= + cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); } /** * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring * @xdp_ring: XDP ring to produce the HW Tx descriptors on - * @budget: number of free descriptors on HW Tx ring that can be used - * @napi_budget: amount of descriptors that NAPI allows us to clean * * Returns true if there is no more work that needs to be done, false otherwise */ -bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget) +bool ice_xmit_zc(struct ice_tx_ring *xdp_ring) { struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; - u16 tx_thresh = ICE_RING_QUARTER(xdp_ring); u32 nb_pkts, nb_processed = 0; unsigned int total_bytes = 0; + int budget; + + ice_clean_xdp_irq_zc(xdp_ring); - if (budget < tx_thresh) - budget += ice_clean_xdp_irq_zc(xdp_ring, napi_budget); + budget = ICE_DESC_UNUSED(xdp_ring); + budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring)); nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); if (!nb_pkts) return true; if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { - struct ice_tx_desc *tx_desc; - nb_processed = xdp_ring->count - xdp_ring->next_to_use; ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); - tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs); - tx_desc->cmd_type_offset_bsz |= - cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S); - xdp_ring->next_rs = tx_thresh - 1; xdp_ring->next_to_use = 0; } ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, &total_bytes); + ice_set_rs_bit(xdp_ring); ice_xdp_ring_update_tail(xdp_ring); ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes); @@ -1058,14 +1029,16 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi) */ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { - u16 count_mask = rx_ring->count - 1; u16 ntc = rx_ring->next_to_clean; u16 ntu = rx_ring->next_to_use; - for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) { + while (ntc != ntu) { struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc); xsk_buff_free(xdp); + ntc++; + if (ntc >= rx_ring->count) + ntc = 0; } } diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 4edbe81eb646..6fa181f080ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -26,13 +26,10 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count); bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); -bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget); +bool ice_xmit_zc(struct ice_tx_ring *xdp_ring); int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); #else -static inline bool -ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring, - u32 __always_unused budget, - int __always_unused napi_budget) +static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring) { return false; } diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c index 0eec05d905eb..4a3baa7e0142 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c @@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv) void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name) { - static struct dentry *mvpp2_root; - struct dentry *mvpp2_dir; + struct dentry *mvpp2_dir, *mvpp2_root; int ret, i; + mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL); if (!mvpp2_root) mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index ecf85e9ed824..0f9668a4079d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -319,8 +319,8 @@ #define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18) #define MTK_RXD5_SRC_PORT GENMASK(29, 26) -#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0xf) -#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0x7) +#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7) +#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf) /* PDMA V2 descriptor rxd3 */ #define RX_DMA_VTAG_V2 BIT(0) diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c index 4aeb927c3715..aa780b1614a3 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c @@ -246,8 +246,8 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv) } priv->clk_io = devm_ioremap(dev, res->start, resource_size(res)); - if (IS_ERR(priv->clk_io)) - return PTR_ERR(priv->clk_io); + if (!priv->clk_io) + return -ENOMEM; mlxbf_gige_mdio_cfg(priv); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 306026e6aa11..8f8005bc6eea 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -290,6 +290,13 @@ static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port) if (!(vlan->portmask & BIT(port))) continue; + /* Ignore the VLAN added by ocelot_add_vlan_unaware_pvid(), + * because this is never active in hardware at the same time as + * the bridge VLANs, which only matter in VLAN-aware mode. + */ + if (vlan->vid >= OCELOT_RSV_VLAN_RANGE_START) + continue; + if (vlan->untagged & BIT(port)) num_untagged++; } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index ee734b69150f..d1e1aa19a68e 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -4213,7 +4213,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = { .ev_test_generate = efx_ef10_ev_test_generate, .filter_table_probe = efx_ef10_filter_table_probe, .filter_table_restore = efx_mcdi_filter_table_restore, - .filter_table_remove = efx_mcdi_filter_table_remove, + .filter_table_remove = efx_ef10_filter_table_remove, .filter_update_rx_scatter = efx_mcdi_update_rx_scatter, .filter_insert = efx_mcdi_filter_insert, .filter_remove_safe = efx_mcdi_filter_remove_safe, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 592d29abcb1c..9083159b93f1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3801,6 +3801,15 @@ static int __stmmac_open(struct net_device *dev, stmmac_reset_queues_param(priv); + if (priv->plat->serdes_powerup) { + ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); + if (ret < 0) { + netdev_err(priv->dev, "%s: Serdes powerup failed\n", + __func__); + goto init_error; + } + } + ret = stmmac_hw_setup(dev, true); if (ret < 0) { netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); @@ -3904,6 +3913,10 @@ static int stmmac_release(struct net_device *dev) /* Disable the MAC Rx/Tx */ stmmac_mac_set(priv, priv->ioaddr, false); + /* Powerdown Serdes if there is */ + if (priv->plat->serdes_powerdown) + priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); + netif_carrier_off(dev); stmmac_release_ptp(priv); @@ -7293,14 +7306,6 @@ int stmmac_dvr_probe(struct device *device, goto error_netdev_register; } - if (priv->plat->serdes_powerup) { - ret = priv->plat->serdes_powerup(ndev, - priv->plat->bsp_priv); - - if (ret < 0) - goto error_serdes_powerup; - } - #ifdef CONFIG_DEBUG_FS stmmac_init_fs(ndev); #endif @@ -7315,8 +7320,6 @@ int stmmac_dvr_probe(struct device *device, return ret; -error_serdes_powerup: - unregister_netdev(ndev); error_netdev_register: phylink_destroy(priv->phylink); error_xpcs_setup: diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 74e845fa2e07..aa8f828a0ae7 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c @@ -213,6 +213,7 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) pci_iounmap(pdev, rrpriv->regs); if (pdev) pci_release_regions(pdev); + pci_disable_device(pdev); out2: free_netdev(dev); out3: diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 12ff276b80ae..4df8c337221b 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -316,11 +316,13 @@ static __maybe_unused int mdio_bus_phy_resume(struct device *dev) phydev->suspended_by_mdio_bus = 0; - /* If we manged to get here with the PHY state machine in a state neither - * PHY_HALTED nor PHY_READY this is an indication that something went wrong - * and we should most likely be using MAC managed PM and we are not. + /* If we managed to get here with the PHY state machine in a state + * neither PHY_HALTED, PHY_READY nor PHY_UP, this is an indication + * that something went wrong and we should most likely be using + * MAC managed PM, but we are not. */ - WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY); + WARN_ON(phydev->state != PHY_HALTED && phydev->state != PHY_READY && + phydev->state != PHY_UP); ret = phy_init_hw(phydev); if (ret < 0) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 259b2b84b2b3..db736b944016 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2828,7 +2828,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) rcu_assign_pointer(tfile->tun, tun); } - netif_carrier_on(tun->dev); + if (ifr->ifr_flags & IFF_NO_CARRIER) + netif_carrier_off(tun->dev); + else + netif_carrier_on(tun->dev); /* Make sure persistent devices do not get stuck in * xoff state. @@ -3056,8 +3059,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, * This is needed because we never checked for invalid flags on * TUNSETIFF. */ - return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES, - (unsigned int __user*)argp); + return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER | + TUN_FEATURES, (unsigned int __user*)argp); } else if (cmd == TUNSETQUEUE) { return tun_set_queue(file, &ifr); } else if (cmd == SIOCGSKNS) { diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 0cb187def5bc..26c34a7c21bd 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1402,6 +1402,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ + {QMI_FIXED_INTF(0x413c, 0x81c2, 8)}, /* Dell Wireless 5811e */ {QMI_FIXED_INTF(0x413c, 0x81cc, 8)}, /* Dell Wireless 5816e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */ diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index aaa89b4cfd50..e368b0780753 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1598,6 +1598,7 @@ void usbnet_disconnect (struct usb_interface *intf) struct usbnet *dev; struct usb_device *xdev; struct net_device *net; + struct urb *urb; dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); @@ -1614,7 +1615,11 @@ void usbnet_disconnect (struct usb_interface *intf) net = dev->net; unregister_netdev (net); - usb_scuttle_anchored_urbs(&dev->deferred); + while ((urb = usb_get_from_anchor(&dev->deferred))) { + dev_kfree_skb(urb->context); + kfree(urb->sg); + usb_free_urb(urb); + } if (dev->driver_info->unbind) dev->driver_info->unbind(dev, intf); |