summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/idpf/idpf_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/idpf/idpf_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c205
1 files changed, 73 insertions, 132 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 585c3dadd9bf..76479c4f3fec 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,10 +2,19 @@
/* Copyright (C) 2023 Intel Corporation */
#include <net/libeth/rx.h>
+#include <net/libeth/tx.h>
#include "idpf.h"
#include "idpf_virtchnl.h"
+struct idpf_tx_stash {
+ struct hlist_node hlist;
+ struct libeth_sqe buf;
+};
+
+#define idpf_tx_buf_compl_tag(buf) (*(int *)&(buf)->priv)
+LIBETH_SQE_CHECK_PRIV(int);
+
static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
unsigned int count);
@@ -61,40 +70,17 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
- * idpf_tx_buf_rel - Release a Tx buffer
- * @tx_q: the queue that owns the buffer
- * @tx_buf: the buffer to free
- */
-static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf)
-{
- if (tx_buf->skb) {
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dev_kfree_skb_any(tx_buf->skb);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- }
-
- tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
-/**
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
+ struct libeth_sq_napi_stats ss = { };
struct idpf_buf_lifo *buf_stack;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
u16 i;
/* Buffers already cleared, nothing to do */
@@ -103,7 +89,7 @@ static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
/* Free all the Tx buffer sk_buffs */
for (i = 0; i < txq->desc_count; i++)
- idpf_tx_buf_rel(txq, &txq->tx_buf[i]);
+ libeth_tx_complete(&txq->tx_buf[i], &cp);
kfree(txq->tx_buf);
txq->tx_buf = NULL;
@@ -203,10 +189,6 @@ static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
if (!tx_q->tx_buf)
return -ENOMEM;
- /* Initialize tx_bufs with invalid completion tags */
- for (i = 0; i < tx_q->desc_count; i++)
- tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
-
if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
return 0;
@@ -1656,37 +1638,6 @@ static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
}
/**
- * idpf_tx_splitq_clean_hdr - Clean TX buffer resources for header portion of
- * packet
- * @tx_q: tx queue to clean buffer from
- * @tx_buf: buffer to be cleaned
- * @cleaned: pointer to stats struct to track cleaned packets/bytes
- * @napi_budget: Used to determine if we are in netpoll
- */
-static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
- struct idpf_tx_buf *tx_buf,
- struct idpf_cleaned_stats *cleaned,
- int napi_budget)
-{
- napi_consume_skb(tx_buf->skb, napi_budget);
-
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_single(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
-
- dma_unmap_len_set(tx_buf, len, 0);
- }
-
- /* clear tx_buf data */
- tx_buf->skb = NULL;
-
- cleaned->bytes += tx_buf->bytecount;
- cleaned->packets += tx_buf->gso_segs;
-}
-
-/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
* @txq: queue to clean
@@ -1696,28 +1647,25 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
*/
static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
struct idpf_tx_stash *stash;
struct hlist_node *tmp_buf;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
/* Buffer completion */
hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
hlist, compl_tag) {
- if (unlikely(stash->buf.compl_tag != (int)compl_tag))
+ if (unlikely(idpf_tx_buf_compl_tag(&stash->buf) !=
+ (int)compl_tag))
continue;
- if (stash->buf.skb) {
- idpf_tx_splitq_clean_hdr(txq, &stash->buf, cleaned,
- budget);
- } else if (dma_unmap_len(&stash->buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(&stash->buf, dma),
- dma_unmap_len(&stash->buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(&stash->buf, len, 0);
- }
+ libeth_tx_complete(&stash->buf, &cp);
/* Push shadow buf back onto stack */
idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
@@ -1737,8 +1685,7 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
{
struct idpf_tx_stash *stash;
- if (unlikely(!dma_unmap_addr(tx_buf, dma) &&
- !dma_unmap_len(tx_buf, len)))
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
return 0;
stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
@@ -1751,20 +1698,18 @@ static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
/* Store buffer params in shadow buffer */
stash->buf.skb = tx_buf->skb;
- stash->buf.bytecount = tx_buf->bytecount;
- stash->buf.gso_segs = tx_buf->gso_segs;
+ stash->buf.bytes = tx_buf->bytes;
+ stash->buf.packets = tx_buf->packets;
+ stash->buf.type = tx_buf->type;
dma_unmap_addr_set(&stash->buf, dma, dma_unmap_addr(tx_buf, dma));
dma_unmap_len_set(&stash->buf, len, dma_unmap_len(tx_buf, len));
- stash->buf.compl_tag = tx_buf->compl_tag;
+ idpf_tx_buf_compl_tag(&stash->buf) = idpf_tx_buf_compl_tag(tx_buf);
/* Add buffer to buf_hash table to be freed later */
hash_add(txq->stash->sched_buf_hash, &stash->hlist,
- stash->buf.compl_tag);
-
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
+ idpf_tx_buf_compl_tag(&stash->buf));
- /* Reinitialize buf_id portion of tag */
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ tx_buf->type = LIBETH_SQE_EMPTY;
return 0;
}
@@ -1800,12 +1745,17 @@ do { \
*/
static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
bool descs_only)
{
union idpf_tx_flex_desc *next_pending_desc = NULL;
union idpf_tx_flex_desc *tx_desc;
s16 ntc = tx_q->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = tx_q->dev,
+ .ss = cleaned,
+ .napi = napi_budget,
+ };
struct idpf_tx_buf *tx_buf;
tx_desc = &tx_q->flex_tx[ntc];
@@ -1821,13 +1771,10 @@ static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
* invalid completion tag since no buffer was used. We can
* skip this descriptor since there is no buffer to clean.
*/
- if (unlikely(tx_buf->compl_tag == IDPF_SPLITQ_TX_INVAL_COMPL_TAG))
+ if (tx_buf->type <= LIBETH_SQE_CTX)
goto fetch_next_txq_desc;
- eop_desc = (union idpf_tx_flex_desc *)tx_buf->next_to_watch;
-
- /* clear next_to_watch to prevent false hangs */
- tx_buf->next_to_watch = NULL;
+ eop_desc = &tx_q->flex_tx[tx_buf->rs_idx];
if (descs_only) {
if (idpf_stash_flow_sch_buffers(tx_q, tx_buf))
@@ -1844,8 +1791,7 @@ static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
}
}
} else {
- idpf_tx_splitq_clean_hdr(tx_q, tx_buf, cleaned,
- napi_budget);
+ libeth_tx_complete(tx_buf, &cp);
/* unmap remaining buffers */
while (tx_desc != eop_desc) {
@@ -1853,13 +1799,7 @@ static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
tx_desc, tx_buf);
/* unmap any remaining paged data */
- if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(tx_q->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
+ libeth_tx_complete(tx_buf, &cp);
}
}
@@ -1895,30 +1835,26 @@ do { \
* this completion tag.
*/
static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 idx = compl_tag & txq->compl_tag_bufid_m;
struct idpf_tx_buf *tx_buf = NULL;
u16 ntc = txq->next_to_clean;
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = cleaned,
+ .napi = budget,
+ };
u16 num_descs_cleaned = 0;
u16 orig_idx = idx;
tx_buf = &txq->tx_buf[idx];
+ if (unlikely(tx_buf->type <= LIBETH_SQE_CTX))
+ return false;
- while (tx_buf->compl_tag == (int)compl_tag) {
- if (tx_buf->skb) {
- idpf_tx_splitq_clean_hdr(txq, tx_buf, cleaned, budget);
- } else if (dma_unmap_len(tx_buf, len)) {
- dma_unmap_page(txq->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- }
-
- memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
- tx_buf->compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ while (idpf_tx_buf_compl_tag(tx_buf) == (int)compl_tag) {
+ libeth_tx_complete(tx_buf, &cp);
num_descs_cleaned++;
idpf_tx_clean_buf_ring_bump_ntc(txq, idx, tx_buf);
@@ -1965,7 +1901,7 @@ static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
*/
static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
- struct idpf_cleaned_stats *cleaned,
+ struct libeth_sq_napi_stats *cleaned,
int budget)
{
u16 compl_tag;
@@ -2008,7 +1944,7 @@ static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
ntc -= complq->desc_count;
do {
- struct idpf_cleaned_stats cleaned_stats = { };
+ struct libeth_sq_napi_stats cleaned_stats = { };
struct idpf_tx_queue *tx_q;
int rel_tx_qid;
u16 hw_head;
@@ -2307,6 +2243,12 @@ unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
+ struct libeth_sq_napi_stats ss = { };
+ struct libeth_cq_pp cp = {
+ .dev = txq->dev,
+ .ss = &ss,
+ };
+
u64_stats_update_begin(&txq->stats_sync);
u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
@@ -2316,7 +2258,7 @@ void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *tx_buf;
tx_buf = &txq->tx_buf[idx];
- idpf_tx_buf_rel(txq, tx_buf);
+ libeth_tx_complete(tx_buf, &cp);
if (tx_buf == first)
break;
if (idx == 0)
@@ -2405,7 +2347,8 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
if (dma_mapping_error(tx_q->dev, dma))
return idpf_tx_dma_map_error(tx_q, skb, first, i);
- tx_buf->compl_tag = params->compl_tag;
+ idpf_tx_buf_compl_tag(tx_buf) = params->compl_tag;
+ tx_buf->type = LIBETH_SQE_FRAG;
/* record length, and DMA address */
dma_unmap_len_set(tx_buf, len, size);
@@ -2479,8 +2422,7 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
* simply pass over these holes and finish cleaning the
* rest of the packet.
*/
- memset(&tx_q->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- tx_q->tx_buf[i].compl_tag = params->compl_tag;
+ tx_q->tx_buf[i].type = LIBETH_SQE_EMPTY;
/* Adjust the DMA offset and the remaining size of the
* fragment. On the first iteration of this loop,
@@ -2525,19 +2467,19 @@ static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
/* record SW timestamp if HW timestamp is not available */
skb_tx_timestamp(skb);
+ first->type = LIBETH_SQE_SKB;
+
/* write last descriptor with RS and EOP bits */
+ first->rs_idx = i;
td_cmd |= params->eop_cmd;
idpf_tx_splitq_build_desc(tx_desc, params, td_cmd, size);
i = idpf_tx_splitq_bump_ntu(tx_q, i);
- /* set next_to_watch value indicating a packet is present */
- first->next_to_watch = tx_desc;
-
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
- netdev_tx_sent_queue(nq, first->bytecount);
+ netdev_tx_sent_queue(nq, first->bytes);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
}
@@ -2737,8 +2679,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
- memset(&txq->tx_buf[i], 0, sizeof(struct idpf_tx_buf));
- txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ txq->tx_buf[i].type = LIBETH_SQE_CTX;
/* grab the next descriptor */
desc = &txq->flex_ctx[i];
@@ -2822,12 +2763,12 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
first->skb = skb;
if (tso) {
- first->gso_segs = tx_params.offload.tso_segs;
- first->bytecount = skb->len +
- ((first->gso_segs - 1) * tx_params.offload.tso_hdr_len);
+ first->packets = tx_params.offload.tso_segs;
+ first->bytes = skb->len +
+ ((first->packets - 1) * tx_params.offload.tso_hdr_len);
} else {
- first->gso_segs = 1;
- first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
+ first->packets = 1;
+ first->bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
}
if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {