summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/ef100_rep.c
diff options
context:
space:
mode:
authorEdward Cree <ecree.xilinx@gmail.com>2022-07-28 19:57:45 +0100
committerJakub Kicinski <kuba@kernel.org>2022-07-29 21:22:05 -0700
commit9fe00c800ecd667acb7748cab7fcd3068c58498a (patch)
tree7abdba8cee9dbe284334aa3c08c9051231532581 /drivers/net/ethernet/sfc/ef100_rep.c
parent69bb5fa73d2b2d7fa3ccbf16e8b1f055fe2d26b1 (diff)
downloadlinux-stable-9fe00c800ecd667acb7748cab7fcd3068c58498a.tar.gz
linux-stable-9fe00c800ecd667acb7748cab7fcd3068c58498a.tar.bz2
linux-stable-9fe00c800ecd667acb7748cab7fcd3068c58498a.zip
sfc: ef100 representor RX top half
Representor RX uses a NAPI context driven by a 'fake interrupt': when the parent PF receives a packet destined for the representor, it adds it to an SKB list (efv->rx_list), and schedules NAPI if the 'fake interrupt' is primed. The NAPI poll then pulls packets off this list and feeds them to the stack with netif_receive_skb_list(). This scheme allows us to decouple representor RX from the parent PF's RX fast-path. This patch implements the 'top half', which builds an SKB, copies data into it from the RX buffer (which can then be released), adds it to the queue and fires the 'fake interrupt' if necessary. Signed-off-by: Edward Cree <ecree.xilinx@gmail.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/sfc/ef100_rep.c')
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c55
1 files changed, 55 insertions, 0 deletions
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index fe45ae963391..e6c6e9e764b2 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -13,9 +13,12 @@
#include "ef100_netdev.h"
#include "ef100_nic.h"
#include "mae.h"
+#include "rx_common.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
+#define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
+
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
@@ -198,6 +201,7 @@ static int efx_ef100_configure_rep(struct efx_rep *efv)
u32 selector;
int rc;
+ efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
/* Construct mport selector for corresponding VF */
efx_mae_mport_vf(efx, efv->idx, &selector);
/* Look up actual mport ID */
@@ -320,3 +324,54 @@ static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
}
return spent;
}
+
+void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
+{
+ u8 *eh = efx_rx_buf_va(rx_buf);
+ struct sk_buff *skb;
+ bool primed;
+
+ /* Don't allow too many queued SKBs to build up, as they consume
+ * GFP_ATOMIC memory. If we overrun, just start dropping.
+ */
+ if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "nodesc-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+
+ skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
+ if (!skb) {
+ atomic64_inc(&efv->stats.rx_dropped);
+ if (net_ratelimit())
+ netif_dbg(efv->parent, rx_err, efv->net_dev,
+ "noskb-dropped packet of length %u\n",
+ rx_buf->len);
+ return;
+ }
+ memcpy(skb->data, eh, rx_buf->len);
+ __skb_put(skb, rx_buf->len);
+
+ skb_record_rx_queue(skb, 0); /* rep is single-queue */
+
+ /* Move past the ethernet header */
+ skb->protocol = eth_type_trans(skb, efv->net_dev);
+
+ skb_checksum_none_assert(skb);
+
+ atomic64_inc(&efv->stats.rx_packets);
+ atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
+
+ /* Add it to the rx list */
+ spin_lock_bh(&efv->rx_lock);
+ primed = efv->read_index == efv->write_index;
+ list_add_tail(&skb->list, &efv->rx_list);
+ efv->write_index++;
+ spin_unlock_bh(&efv->rx_lock);
+ /* Trigger rx work */
+ if (primed)
+ napi_schedule(&efv->napi);
+}