diff options
Diffstat (limited to 'drivers/net/ethernet/microsoft')
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/gdma_main.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_bpf.c | 22 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_en.c | 457 | ||||
-rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 52 |
4 files changed, 442 insertions, 91 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index f9b8f372ec8a..8f3f78b68592 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1439,7 +1439,6 @@ free_gc: release_region: pci_release_regions(pdev); disable_dev: - pci_clear_master(pdev); pci_disable_device(pdev); dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err); return err; @@ -1458,7 +1457,6 @@ static void mana_gd_remove(struct pci_dev *pdev) vfree(gc); pci_release_regions(pdev); - pci_clear_master(pdev); pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c index 3caea631229c..23b1521c0df9 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c +++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c @@ -133,12 +133,6 @@ out: return act; } -static unsigned int mana_xdp_fraglen(unsigned int len) -{ - return SKB_DATA_ALIGN(len) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); -} - struct bpf_prog *mana_xdp_get(struct mana_port_context *apc) { ASSERT_RTNL(); @@ -179,17 +173,18 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, { struct mana_port_context *apc = netdev_priv(ndev); struct bpf_prog *old_prog; - int buf_max; + struct gdma_context *gc; + + gc = apc->ac->gdma_dev->gdma_context; old_prog = mana_xdp_get(apc); if (!old_prog && !prog) return 0; - buf_max = XDP_PACKET_HEADROOM + mana_xdp_fraglen(ndev->mtu + ETH_HLEN); - if (prog && buf_max > PAGE_SIZE) { - netdev_err(ndev, "XDP: mtu:%u too large, buf_max:%u\n", - ndev->mtu, buf_max); + if (prog && ndev->mtu > MANA_XDP_MTU_MAX) { + netdev_err(ndev, "XDP: mtu:%u too large, mtu_max:%lu\n", + ndev->mtu, MANA_XDP_MTU_MAX); NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large"); return -EOPNOTSUPP; @@ -206,6 +201,11 @@ static int mana_xdp_set(struct net_device *ndev, struct bpf_prog *prog, if (apc->port_is_up) mana_chn_setxdp(apc, prog); + if (prog) + ndev->max_mtu = MANA_XDP_MTU_MAX; + else + ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; + return 0; } diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 6120f2b6684f..06d6292e09b3 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -156,6 +156,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct mana_txq *txq; struct mana_cq *cq; int err, len; + u16 ihs; if (unlikely(!apc->port_is_up)) goto tx_drop; @@ -166,6 +167,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) txq = &apc->tx_qp[txq_idx].txq; gdma_sq = txq->gdma_sq; cq = &apc->tx_qp[txq_idx].tx_cq; + tx_stats = &txq->stats; pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; @@ -179,10 +181,17 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt; - if (pkt_fmt == MANA_SHORT_PKT_FMT) + if (pkt_fmt == MANA_SHORT_PKT_FMT) { pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob); - else + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->short_pkt_fmt++; + u64_stats_update_end(&tx_stats->syncp); + } else { pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob); + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->long_pkt_fmt++; + u64_stats_update_end(&tx_stats->syncp); + } pkg.wqe_req.inline_oob_data = &pkg.tx_oob; pkg.wqe_req.flags = 0; @@ -232,9 +241,35 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } + + if (skb->encapsulation) { + ihs = skb_inner_tcp_all_headers(skb); + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->tso_inner_packets++; + tx_stats->tso_inner_bytes += skb->len - ihs; + u64_stats_update_end(&tx_stats->syncp); + } else { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + ihs = skb_transport_offset(skb) + sizeof(struct udphdr); + } else { + ihs = skb_tcp_all_headers(skb); + if (ipv6_has_hopopt_jumbo(skb)) + ihs -= sizeof(struct hop_jumbo_hdr); + } + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->tso_packets++; + tx_stats->tso_bytes += skb->len - ihs; + u64_stats_update_end(&tx_stats->syncp); + } + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { csum_type = mana_checksum_info(skb); + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->csum_partial++; + u64_stats_update_end(&tx_stats->syncp); + if (csum_type == IPPROTO_TCP) { pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4; pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6; @@ -254,8 +289,12 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } - if (mana_map_skb(skb, apc, &pkg)) + if (mana_map_skb(skb, apc, &pkg)) { + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->mana_map_err++; + u64_stats_update_end(&tx_stats->syncp); goto free_sgl_ptr; + } skb_queue_tail(&txq->pending_skbs, skb); @@ -388,6 +427,199 @@ static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb, return txq; } +/* Release pre-allocated RX buffers */ +static void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc) +{ + struct device *dev; + int i; + + dev = mpc->ac->gdma_dev->gdma_context->dev; + + if (!mpc->rxbufs_pre) + goto out1; + + if (!mpc->das_pre) + goto out2; + + while (mpc->rxbpre_total) { + i = --mpc->rxbpre_total; + dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize, + DMA_FROM_DEVICE); + put_page(virt_to_head_page(mpc->rxbufs_pre[i])); + } + + kfree(mpc->das_pre); + mpc->das_pre = NULL; + +out2: + kfree(mpc->rxbufs_pre); + mpc->rxbufs_pre = NULL; + +out1: + mpc->rxbpre_datasize = 0; + mpc->rxbpre_alloc_size = 0; + mpc->rxbpre_headroom = 0; +} + +/* Get a buffer from the pre-allocated RX buffers */ +static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da) +{ + struct net_device *ndev = rxq->ndev; + struct mana_port_context *mpc; + void *va; + + mpc = netdev_priv(ndev); + + if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) { + netdev_err(ndev, "No RX pre-allocated bufs\n"); + return NULL; + } + + /* Check sizes to catch unexpected coding error */ + if (mpc->rxbpre_datasize != rxq->datasize) { + netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n", + mpc->rxbpre_datasize, rxq->datasize); + return NULL; + } + + if (mpc->rxbpre_alloc_size != rxq->alloc_size) { + netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n", + mpc->rxbpre_alloc_size, rxq->alloc_size); + return NULL; + } + + if (mpc->rxbpre_headroom != rxq->headroom) { + netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n", + mpc->rxbpre_headroom, rxq->headroom); + return NULL; + } + + mpc->rxbpre_total--; + + *da = mpc->das_pre[mpc->rxbpre_total]; + va = mpc->rxbufs_pre[mpc->rxbpre_total]; + mpc->rxbufs_pre[mpc->rxbpre_total] = NULL; + + /* Deallocate the array after all buffers are gone */ + if (!mpc->rxbpre_total) + mana_pre_dealloc_rxbufs(mpc); + + return va; +} + +/* Get RX buffer's data size, alloc size, XDP headroom based on MTU */ +static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size, + u32 *headroom) +{ + if (mtu > MANA_XDP_MTU_MAX) + *headroom = 0; /* no support for XDP */ + else + *headroom = XDP_PACKET_HEADROOM; + + *alloc_size = mtu + MANA_RXBUF_PAD + *headroom; + + *datasize = ALIGN(mtu + ETH_HLEN, MANA_RX_DATA_ALIGN); +} + +static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) +{ + struct device *dev; + struct page *page; + dma_addr_t da; + int num_rxb; + void *va; + int i; + + mana_get_rxbuf_cfg(new_mtu, &mpc->rxbpre_datasize, + &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom); + + dev = mpc->ac->gdma_dev->gdma_context->dev; + + num_rxb = mpc->num_queues * RX_BUFFERS_PER_QUEUE; + + WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n"); + mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL); + if (!mpc->rxbufs_pre) + goto error; + + mpc->das_pre = kmalloc_array(num_rxb, sizeof(dma_addr_t), GFP_KERNEL); + if (!mpc->das_pre) + goto error; + + mpc->rxbpre_total = 0; + + for (i = 0; i < num_rxb; i++) { + if (mpc->rxbpre_alloc_size > PAGE_SIZE) { + va = netdev_alloc_frag(mpc->rxbpre_alloc_size); + if (!va) + goto error; + + page = virt_to_head_page(va); + /* Check if the frag falls back to single page */ + if (compound_order(page) < + get_order(mpc->rxbpre_alloc_size)) { + put_page(page); + goto error; + } + } else { + page = dev_alloc_page(); + if (!page) + goto error; + + va = page_to_virt(page); + } + + da = dma_map_single(dev, va + mpc->rxbpre_headroom, + mpc->rxbpre_datasize, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, da)) { + put_page(virt_to_head_page(va)); + goto error; + } + + mpc->rxbufs_pre[i] = va; + mpc->das_pre[i] = da; + mpc->rxbpre_total = i + 1; + } + + return 0; + +error: + mana_pre_dealloc_rxbufs(mpc); + return -ENOMEM; +} + +static int mana_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct mana_port_context *mpc = netdev_priv(ndev); + unsigned int old_mtu = ndev->mtu; + int err; + + /* Pre-allocate buffers to prevent failure in mana_attach later */ + err = mana_pre_alloc_rxbufs(mpc, new_mtu); + if (err) { + netdev_err(ndev, "Insufficient memory for new MTU\n"); + return err; + } + + err = mana_detach(ndev, false); + if (err) { + netdev_err(ndev, "mana_detach failed: %d\n", err); + goto out; + } + + ndev->mtu = new_mtu; + + err = mana_attach(ndev); + if (err) { + netdev_err(ndev, "mana_attach failed: %d\n", err); + ndev->mtu = old_mtu; + } + +out: + mana_pre_dealloc_rxbufs(mpc); + return err; +} + static const struct net_device_ops mana_devops = { .ndo_open = mana_open, .ndo_stop = mana_close, @@ -397,6 +629,7 @@ static const struct net_device_ops mana_devops = { .ndo_get_stats64 = mana_get_stats64, .ndo_bpf = mana_bpf, .ndo_xdp_xmit = mana_xdp_xmit, + .ndo_change_mtu = mana_change_mtu, }; static void mana_cleanup_port_context(struct mana_port_context *apc) @@ -586,6 +819,9 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG, sizeof(req), sizeof(resp)); + + req.hdr.resp.msg_version = GDMA_MESSAGE_V2; + req.proto_major_ver = proto_major_ver; req.proto_minor_ver = proto_minor_ver; req.proto_micro_ver = proto_micro_ver; @@ -608,6 +844,11 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, *max_num_vports = resp.max_num_vports; + if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2) + gc->adapter_mtu = resp.adapter_mtu; + else + gc->adapter_mtu = ETH_FRAME_LEN; + return 0; } @@ -1038,6 +1279,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq) if (comp_read < 1) return; + apc->eth_stats.tx_cqes = comp_read; + for (i = 0; i < comp_read; i++) { struct mana_tx_comp_oob *cqe_oob; @@ -1064,6 +1307,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) case CQE_TX_VLAN_TAGGING_VIOLATION: WARN_ONCE(1, "TX: CQE error %d: ignored.\n", cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_err++; break; default: @@ -1072,6 +1316,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) */ WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n", cqe_oob->cqe_hdr.cqe_type); + apc->eth_stats.tx_cqe_unknown_type++; return; } @@ -1118,6 +1363,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq) WARN_ON_ONCE(1); cq->work_done = pkt_transmitted; + + apc->eth_stats.tx_cqes -= pkt_transmitted; } static void mana_post_pkt_rxq(struct mana_rxq *rxq) @@ -1140,10 +1387,10 @@ static void mana_post_pkt_rxq(struct mana_rxq *rxq) WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); } -static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len, - struct xdp_buff *xdp) +static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va, + uint pkt_len, struct xdp_buff *xdp) { - struct sk_buff *skb = build_skb(buf_va, PAGE_SIZE); + struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size); if (!skb) return NULL; @@ -1151,11 +1398,12 @@ static struct sk_buff *mana_build_skb(void *buf_va, uint pkt_len, if (xdp->data_hard_start) { skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); - } else { - skb_reserve(skb, XDP_PACKET_HEADROOM); - skb_put(skb, pkt_len); + return skb; } + skb_reserve(skb, rxq->headroom); + skb_put(skb, pkt_len); + return skb; } @@ -1188,7 +1436,7 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, if (act != XDP_PASS && act != XDP_TX) goto drop_xdp; - skb = mana_build_skb(buf_va, pkt_len, &xdp); + skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp); if (!skb) goto drop; @@ -1237,14 +1485,77 @@ drop_xdp: u64_stats_update_end(&rx_stats->syncp); drop: - WARN_ON_ONCE(rxq->xdp_save_page); - rxq->xdp_save_page = virt_to_page(buf_va); + WARN_ON_ONCE(rxq->xdp_save_va); + /* Save for reuse */ + rxq->xdp_save_va = buf_va; ++ndev->stats.rx_dropped; return; } +static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, + dma_addr_t *da, bool is_napi) +{ + struct page *page; + void *va; + + /* Reuse XDP dropped page if available */ + if (rxq->xdp_save_va) { + va = rxq->xdp_save_va; + rxq->xdp_save_va = NULL; + } else if (rxq->alloc_size > PAGE_SIZE) { + if (is_napi) + va = napi_alloc_frag(rxq->alloc_size); + else + va = netdev_alloc_frag(rxq->alloc_size); + + if (!va) + return NULL; + + page = virt_to_head_page(va); + /* Check if the frag falls back to single page */ + if (compound_order(page) < get_order(rxq->alloc_size)) { + put_page(page); + return NULL; + } + } else { + page = dev_alloc_page(); + if (!page) + return NULL; + + va = page_to_virt(page); + } + + *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, + DMA_FROM_DEVICE); + if (dma_mapping_error(dev, *da)) { + put_page(virt_to_head_page(va)); + return NULL; + } + + return va; +} + +/* Allocate frag for rx buffer, and save the old buf */ +static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, + struct mana_recv_buf_oob *rxoob, void **old_buf) +{ + dma_addr_t da; + void *va; + + va = mana_get_rxfrag(rxq, dev, &da, true); + if (!va) + return; + + dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize, + DMA_FROM_DEVICE); + *old_buf = rxoob->buf_va; + + rxoob->buf_va = va; + rxoob->sgl[0].address = da; +} + static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, struct gdma_comp *cqe) { @@ -1252,11 +1563,12 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context; struct net_device *ndev = rxq->ndev; struct mana_recv_buf_oob *rxbuf_oob; + struct mana_port_context *apc; struct device *dev = gc->dev; - void *new_buf, *old_buf; - struct page *new_page; + void *old_buf = NULL; u32 curr, pktlen; - dma_addr_t da; + + apc = netdev_priv(ndev); switch (oob->cqe_hdr.cqe_type) { case CQE_RX_OKAY: @@ -1270,6 +1582,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, case CQE_RX_COALESCED_4: netdev_err(ndev, "RX coalescing is unsupported\n"); + apc->eth_stats.rx_coalesced_err++; return; case CQE_RX_OBJECT_FENCE: @@ -1279,6 +1592,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, default: netdev_err(ndev, "Unknown RX CQE type = %d\n", oob->cqe_hdr.cqe_type); + apc->eth_stats.rx_cqe_unknown_type++; return; } @@ -1295,40 +1609,11 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, rxbuf_oob = &rxq->rx_oobs[curr]; WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); - /* Reuse XDP dropped page if available */ - if (rxq->xdp_save_page) { - new_page = rxq->xdp_save_page; - rxq->xdp_save_page = NULL; - } else { - new_page = alloc_page(GFP_ATOMIC); - } - - if (new_page) { - da = dma_map_page(dev, new_page, XDP_PACKET_HEADROOM, rxq->datasize, - DMA_FROM_DEVICE); - - if (dma_mapping_error(dev, da)) { - __free_page(new_page); - new_page = NULL; - } - } - - new_buf = new_page ? page_to_virt(new_page) : NULL; - - if (new_buf) { - dma_unmap_page(dev, rxbuf_oob->buf_dma_addr, rxq->datasize, - DMA_FROM_DEVICE); - - old_buf = rxbuf_oob->buf_va; - - /* refresh the rxbuf_oob with the new page */ - rxbuf_oob->buf_va = new_buf; - rxbuf_oob->buf_dma_addr = da; - rxbuf_oob->sgl[0].address = rxbuf_oob->buf_dma_addr; - } else { - old_buf = NULL; /* drop the packet if no memory */ - } + mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf); + /* Unsuccessful refill will have old_buf == NULL. + * In this case, mana_rx_skb() will drop the packet. + */ mana_rx_skb(old_buf, oob, rxq); drop: @@ -1341,11 +1626,15 @@ static void mana_poll_rx_cq(struct mana_cq *cq) { struct gdma_comp *comp = cq->gdma_comp_buf; struct mana_rxq *rxq = cq->rxq; + struct mana_port_context *apc; int comp_read, i; + apc = netdev_priv(rxq->ndev); + comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER); WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER); + apc->eth_stats.rx_cqes = comp_read; rxq->xdp_flush = false; for (i = 0; i < comp_read; i++) { @@ -1357,6 +1646,8 @@ static void mana_poll_rx_cq(struct mana_cq *cq) return; mana_process_rx_cqe(rxq, cq, &comp[i]); + + apc->eth_stats.rx_cqes--; } if (rxq->xdp_flush) @@ -1603,8 +1894,8 @@ static void mana_destroy_rxq(struct mana_port_context *apc, mana_deinit_cq(apc, &rxq->rx_cq); - if (rxq->xdp_save_page) - __free_page(rxq->xdp_save_page); + if (rxq->xdp_save_va) + put_page(virt_to_head_page(rxq->xdp_save_va)); for (i = 0; i < rxq->num_rx_buf; i++) { rx_oob = &rxq->rx_oobs[i]; @@ -1612,10 +1903,10 @@ static void mana_destroy_rxq(struct mana_port_context *apc, if (!rx_oob->buf_va) continue; - dma_unmap_page(dev, rx_oob->buf_dma_addr, rxq->datasize, - DMA_FROM_DEVICE); + dma_unmap_single(dev, rx_oob->sgl[0].address, + rx_oob->sgl[0].size, DMA_FROM_DEVICE); - free_page((unsigned long)rx_oob->buf_va); + put_page(virt_to_head_page(rx_oob->buf_va)); rx_oob->buf_va = NULL; } @@ -1625,6 +1916,30 @@ static void mana_destroy_rxq(struct mana_port_context *apc, kfree(rxq); } +static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key, + struct mana_rxq *rxq, struct device *dev) +{ + struct mana_port_context *mpc = netdev_priv(rxq->ndev); + dma_addr_t da; + void *va; + + if (mpc->rxbufs_pre) + va = mana_get_rxbuf_pre(rxq, &da); + else + va = mana_get_rxfrag(rxq, dev, &da, false); + + if (!va) + return -ENOMEM; + + rx_oob->buf_va = va; + + rx_oob->sgl[0].address = da; + rx_oob->sgl[0].size = rxq->datasize; + rx_oob->sgl[0].mem_key = mem_key; + + return 0; +} + #define MANA_WQE_HEADER_SIZE 16 #define MANA_WQE_SGE_SIZE 16 @@ -1634,11 +1949,10 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc, struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct mana_recv_buf_oob *rx_oob; struct device *dev = gc->dev; - struct page *page; - dma_addr_t da; u32 buf_idx; + int ret; - WARN_ON(rxq->datasize == 0 || rxq->datasize > PAGE_SIZE); + WARN_ON(rxq->datasize == 0); *rxq_size = 0; *cq_size = 0; @@ -1647,25 +1961,12 @@ static int mana_alloc_rx_wqe(struct mana_port_context *apc, rx_oob = &rxq->rx_oobs[buf_idx]; memset(rx_oob, 0, sizeof(*rx_oob)); - page = alloc_page(GFP_KERNEL); - if (!page) - return -ENOMEM; - - da = dma_map_page(dev, page, XDP_PACKET_HEADROOM, rxq->datasize, - DMA_FROM_DEVICE); - - if (dma_mapping_error(dev, da)) { - __free_page(page); - return -ENOMEM; - } - - rx_oob->buf_va = page_to_virt(page); - rx_oob->buf_dma_addr = da; - rx_oob->num_sge = 1; - rx_oob->sgl[0].address = rx_oob->buf_dma_addr; - rx_oob->sgl[0].size = rxq->datasize; - rx_oob->sgl[0].mem_key = apc->ac->gdma_dev->gpa_mkey; + + ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, + dev); + if (ret) + return ret; rx_oob->wqe_req.sgl = rx_oob->sgl; rx_oob->wqe_req.num_sge = rx_oob->num_sge; @@ -1724,9 +2025,11 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, rxq->ndev = ndev; rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE; rxq->rxq_idx = rxq_idx; - rxq->datasize = ALIGN(MAX_FRAME_SIZE, 64); rxq->rxobj = INVALID_MANA_HANDLE; + mana_get_rxbuf_cfg(ndev->mtu, &rxq->datasize, &rxq->alloc_size, + &rxq->headroom); + err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); if (err) goto out; @@ -2138,8 +2441,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, ndev->netdev_ops = &mana_devops; ndev->ethtool_ops = &mana_ethtool_ops; ndev->mtu = ETH_DATA_LEN; - ndev->max_mtu = ndev->mtu; - ndev->min_mtu = ndev->mtu; + ndev->max_mtu = gc->adapter_mtu - ETH_HLEN; + ndev->min_mtu = ETH_MIN_MTU; ndev->needed_headroom = MANA_HEADROOM; ndev->dev_port = port_idx; SET_NETDEV_DEV(ndev, gc->dev); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index 5b776a33a817..a64c81410dc1 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -13,6 +13,15 @@ static const struct { } mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, + {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)}, + {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, + {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, + tx_cqe_unknown_type)}, + {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)}, + {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, + rx_coalesced_err)}, + {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, + rx_cqe_unknown_type)}, }; static int mana_get_sset_count(struct net_device *ndev, int stringset) @@ -23,7 +32,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset) if (stringset != ETH_SS_STATS) return -EINVAL; - return ARRAY_SIZE(mana_eth_stats) + num_queues * 8; + return ARRAY_SIZE(mana_eth_stats) + num_queues * + (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); } static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) @@ -61,6 +71,22 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) p += ETH_GSTRING_LEN; sprintf(p, "tx_%d_xdp_xmit", i); p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_tso_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_tso_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_tso_inner_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_tso_inner_bytes", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_long_pkt_fmt", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_short_pkt_fmt", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_csum_partial", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_%d_mana_map_err", i); + p += ETH_GSTRING_LEN; } } @@ -78,6 +104,14 @@ static void mana_get_ethtool_stats(struct net_device *ndev, u64 xdp_xmit; u64 xdp_drop; u64 xdp_tx; + u64 tso_packets; + u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; + u64 long_pkt_fmt; + u64 short_pkt_fmt; + u64 csum_partial; + u64 mana_map_err; int q, i = 0; if (!apc->port_is_up) @@ -113,11 +147,27 @@ static void mana_get_ethtool_stats(struct net_device *ndev, packets = tx_stats->packets; bytes = tx_stats->bytes; xdp_xmit = tx_stats->xdp_xmit; + tso_packets = tx_stats->tso_packets; + tso_bytes = tx_stats->tso_bytes; + tso_inner_packets = tx_stats->tso_inner_packets; + tso_inner_bytes = tx_stats->tso_inner_bytes; + long_pkt_fmt = tx_stats->long_pkt_fmt; + short_pkt_fmt = tx_stats->short_pkt_fmt; + csum_partial = tx_stats->csum_partial; + mana_map_err = tx_stats->mana_map_err; } while (u64_stats_fetch_retry(&tx_stats->syncp, start)); data[i++] = packets; data[i++] = bytes; data[i++] = xdp_xmit; + data[i++] = tso_packets; + data[i++] = tso_bytes; + data[i++] = tso_inner_packets; + data[i++] = tso_inner_bytes; + data[i++] = long_pkt_fmt; + data[i++] = short_pkt_fmt; + data[i++] = csum_partial; + data[i++] = mana_map_err; } } |