summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/netronome/nfp/nfp_net_xsk.c
blob: aea507aed49d4c0524bba51563c24b715e559f7e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc */
/* Copyright (C) 2021 Corigine, Inc */

#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <net/xdp_sock_drv.h>
#include <trace/events/xdp.h>

#include "nfp_app.h"
#include "nfp_net.h"
#include "nfp_net_dp.h"
#include "nfp_net_xsk.h"

static void
nfp_net_xsk_rx_bufs_stash(struct nfp_net_rx_ring *rx_ring, unsigned int idx,
			  struct xdp_buff *xdp)
{
	unsigned int headroom;

	headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool);

	rx_ring->rxds[idx].fld.reserved = 0;
	rx_ring->rxds[idx].fld.meta_len_dd = 0;

	rx_ring->xsk_rxbufs[idx].xdp = xdp;
	rx_ring->xsk_rxbufs[idx].dma_addr =
		xsk_buff_xdp_get_frame_dma(xdp) + headroom;
}

void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf)
{
	rxbuf->dma_addr = 0;
	rxbuf->xdp = NULL;
}

void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf)
{
	if (rxbuf->xdp)
		xsk_buff_free(rxbuf->xdp);

	nfp_net_xsk_rx_unstash(rxbuf);
}

void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring)
{
	unsigned int i;

	if (!rx_ring->cnt)
		return;

	for (i = 0; i < rx_ring->cnt - 1; i++)
		nfp_net_xsk_rx_free(&rx_ring->xsk_rxbufs[i]);
}

void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
{
	struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
	struct xsk_buff_pool *pool = r_vec->xsk_pool;
	unsigned int wr_idx, wr_ptr_add = 0;
	struct xdp_buff *xdp;

	while (nfp_net_rx_space(rx_ring)) {
		wr_idx = D_IDX(rx_ring, rx_ring->wr_p);

		xdp = xsk_buff_alloc(pool);
		if (!xdp)
			break;

		nfp_net_xsk_rx_bufs_stash(rx_ring, wr_idx, xdp);

		/* DMA address is expanded to 48-bit width in freelist for NFP3800,
		 * so the *_48b macro is used accordingly, it's also OK to fill
		 * a 40-bit address since the top 8 bits are get set to 0.
		 */
		nfp_desc_set_dma_addr_48b(&rx_ring->rxds[wr_idx].fld,
					  rx_ring->xsk_rxbufs[wr_idx].dma_addr);

		rx_ring->wr_p++;
		wr_ptr_add++;
	}

	/* Ensure all records are visible before incrementing write counter. */
	wmb();
	nfp_qcp_wr_ptr_add(rx_ring->qcp_fl, wr_ptr_add);
}

void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
			 struct nfp_net_xsk_rx_buf *xrxbuf)
{
	u64_stats_update_begin(&r_vec->rx_sync);
	r_vec->rx_drops++;
	u64_stats_update_end(&r_vec->rx_sync);

	nfp_net_xsk_rx_free(xrxbuf);
}

static void nfp_net_xsk_pool_unmap(struct device *dev,
				   struct xsk_buff_pool *pool)
{
	return xsk_pool_dma_unmap(pool, 0);
}

static int nfp_net_xsk_pool_map(struct device *dev, struct xsk_buff_pool *pool)
{
	return xsk_pool_dma_map(pool, dev, 0);
}

int nfp_net_xsk_setup_pool(struct net_device *netdev,
			   struct xsk_buff_pool *pool, u16 queue_id)
{
	struct nfp_net *nn = netdev_priv(netdev);

	struct xsk_buff_pool *prev_pool;
	struct nfp_net_dp *dp;
	int err;

	/* NFDK doesn't implement xsk yet. */
	if (nn->dp.ops->version == NFP_NFD_VER_NFDK)
		return -EOPNOTSUPP;

	/* Reject on old FWs so we can drop some checks on datapath. */
	if (nn->dp.rx_offset != NFP_NET_CFG_RX_OFFSET_DYNAMIC)
		return -EOPNOTSUPP;
	if (!nn->dp.chained_metadata_format)
		return -EOPNOTSUPP;

	/* Install */
	if (pool) {
		err = nfp_net_xsk_pool_map(nn->dp.dev, pool);
		if (err)
			return err;
	}

	/* Reconfig/swap */
	dp = nfp_net_clone_dp(nn);
	if (!dp) {
		err = -ENOMEM;
		goto err_unmap;
	}

	prev_pool = dp->xsk_pools[queue_id];
	dp->xsk_pools[queue_id] = pool;

	err = nfp_net_ring_reconfig(nn, dp, NULL);
	if (err)
		goto err_unmap;

	/* Uninstall */
	if (prev_pool)
		nfp_net_xsk_pool_unmap(nn->dp.dev, prev_pool);

	return 0;
err_unmap:
	if (pool)
		nfp_net_xsk_pool_unmap(nn->dp.dev, pool);

	return err;
}

int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags)
{
	struct nfp_net *nn = netdev_priv(netdev);

	/* queue_id comes from a zero-copy socket, installed with XDP_SETUP_XSK_POOL,
	 * so it must be within our vector range.  Moreover, our napi structs
	 * are statically allocated, so we can always kick them without worrying
	 * if reconfig is in progress or interface down.
	 */
	napi_schedule(&nn->r_vecs[queue_id].napi);

	return 0;
}