summaryrefslogtreecommitdiffstats
path: root/net/xdp
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2020-08-28 10:26:16 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2020-08-31 21:15:04 +0200
commitc4655761d3cf62bf5f86650e79349c1bfa5c6285 (patch)
treeb45ecd0449986910dcac7fc869893d60a627480a /net/xdp
parent1742b3d528690ae7773cf7bf2f01a90ee1de2fe0 (diff)
downloadlinux-c4655761d3cf62bf5f86650e79349c1bfa5c6285.tar.gz
linux-c4655761d3cf62bf5f86650e79349c1bfa5c6285.tar.bz2
linux-c4655761d3cf62bf5f86650e79349c1bfa5c6285.zip
xsk: i40e: ice: ixgbe: mlx5: Rename xsk zero-copy driver interfaces
Rename the AF_XDP zero-copy driver interface functions to better reflect what they do after the replacement of umems with buffer pools in the previous commit. Mostly it is about replacing the umem name from the function names with xsk_buff and also have them take the a buffer pool pointer instead of a umem. The various ring functions have also been renamed in the process so that they have the same naming convention as the internal functions in xsk_queue.h. This so that it will be clearer what they do and also for consistency. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Björn Töpel <bjorn.topel@intel.com> Link: https://lore.kernel.org/bpf/1598603189-32145-3-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xdp_umem.c24
-rw-r--r--net/xdp/xsk.c45
2 files changed, 39 insertions, 30 deletions
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index 0b5f3b04216f..adde4d576b38 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -51,9 +51,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
* not know if the device has more tx queues than rx, or the opposite.
* This might also change during run time.
*/
-static int xdp_reg_xsk_pool_at_qid(struct net_device *dev,
- struct xsk_buff_pool *pool,
- u16 queue_id)
+static int xsk_reg_pool_at_qid(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 queue_id)
{
if (queue_id >= max_t(unsigned int,
dev->real_num_rx_queues,
@@ -68,8 +68,8 @@ static int xdp_reg_xsk_pool_at_qid(struct net_device *dev,
return 0;
}
-struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev,
- u16 queue_id)
+struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+ u16 queue_id)
{
if (queue_id < dev->real_num_rx_queues)
return dev->_rx[queue_id].pool;
@@ -78,9 +78,9 @@ struct xsk_buff_pool *xdp_get_xsk_pool_from_qid(struct net_device *dev,
return NULL;
}
-EXPORT_SYMBOL(xdp_get_xsk_pool_from_qid);
+EXPORT_SYMBOL(xsk_get_pool_from_qid);
-static void xdp_clear_xsk_pool_at_qid(struct net_device *dev, u16 queue_id)
+static void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
{
if (queue_id < dev->real_num_rx_queues)
dev->_rx[queue_id].pool = NULL;
@@ -103,10 +103,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
if (force_zc && force_copy)
return -EINVAL;
- if (xdp_get_xsk_pool_from_qid(dev, queue_id))
+ if (xsk_get_pool_from_qid(dev, queue_id))
return -EBUSY;
- err = xdp_reg_xsk_pool_at_qid(dev, umem->pool, queue_id);
+ err = xsk_reg_pool_at_qid(dev, umem->pool, queue_id);
if (err)
return err;
@@ -119,7 +119,7 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
* Also for supporting drivers that do not implement this
* feature. They will always have to call sendto().
*/
- xsk_set_tx_need_wakeup(umem);
+ xsk_set_tx_need_wakeup(umem->pool);
}
dev_hold(dev);
@@ -148,7 +148,7 @@ err_unreg_umem:
if (!force_zc)
err = 0; /* fallback to copy mode */
if (err)
- xdp_clear_xsk_pool_at_qid(dev, queue_id);
+ xsk_clear_pool_at_qid(dev, queue_id);
return err;
}
@@ -173,7 +173,7 @@ void xdp_umem_clear_dev(struct xdp_umem *umem)
WARN(1, "failed to disable umem!\n");
}
- xdp_clear_xsk_pool_at_qid(umem->dev, umem->queue_id);
+ xsk_clear_pool_at_qid(umem->dev, umem->queue_id);
dev_put(umem->dev);
umem->dev = NULL;
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index c3231620d210..befc9a4331e1 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -39,8 +39,10 @@ bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
READ_ONCE(xs->umem->fq);
}
-void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
+void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
+ struct xdp_umem *umem = pool->umem;
+
if (umem->need_wakeup & XDP_WAKEUP_RX)
return;
@@ -49,8 +51,9 @@ void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
}
EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
-void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
+void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{
+ struct xdp_umem *umem = pool->umem;
struct xdp_sock *xs;
if (umem->need_wakeup & XDP_WAKEUP_TX)
@@ -66,8 +69,10 @@ void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
}
EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
-void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
+void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{
+ struct xdp_umem *umem = pool->umem;
+
if (!(umem->need_wakeup & XDP_WAKEUP_RX))
return;
@@ -76,8 +81,9 @@ void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
}
EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
-void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
+void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{
+ struct xdp_umem *umem = pool->umem;
struct xdp_sock *xs;
if (!(umem->need_wakeup & XDP_WAKEUP_TX))
@@ -93,11 +99,11 @@ void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
}
EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
-bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
+bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{
- return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
+ return pool->umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
}
-EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
+EXPORT_SYMBOL(xsk_uses_need_wakeup);
void xp_release(struct xdp_buff_xsk *xskb)
{
@@ -155,12 +161,12 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
struct xdp_buff *xsk_xdp;
int err;
- if (len > xsk_umem_get_rx_frame_size(xs->umem)) {
+ if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
xs->rx_dropped++;
return -ENOSPC;
}
- xsk_xdp = xsk_buff_alloc(xs->umem);
+ xsk_xdp = xsk_buff_alloc(xs->pool);
if (!xsk_xdp) {
xs->rx_dropped++;
return -ENOSPC;
@@ -249,27 +255,28 @@ void __xsk_map_flush(void)
}
}
-void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
+void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
- xskq_prod_submit_n(umem->cq, nb_entries);
+ xskq_prod_submit_n(pool->umem->cq, nb_entries);
}
-EXPORT_SYMBOL(xsk_umem_complete_tx);
+EXPORT_SYMBOL(xsk_tx_completed);
-void xsk_umem_consume_tx_done(struct xdp_umem *umem)
+void xsk_tx_release(struct xsk_buff_pool *pool)
{
struct xdp_sock *xs;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
+ list_for_each_entry_rcu(xs, &pool->umem->xsk_tx_list, list) {
__xskq_cons_release(xs->tx);
xs->sk.sk_write_space(&xs->sk);
}
rcu_read_unlock();
}
-EXPORT_SYMBOL(xsk_umem_consume_tx_done);
+EXPORT_SYMBOL(xsk_tx_release);
-bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
+bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
{
+ struct xdp_umem *umem = pool->umem;
struct xdp_sock *xs;
rcu_read_lock();
@@ -296,7 +303,7 @@ out:
rcu_read_unlock();
return false;
}
-EXPORT_SYMBOL(xsk_umem_consume_tx);
+EXPORT_SYMBOL(xsk_tx_peek_desc);
static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
{
@@ -359,7 +366,7 @@ static int xsk_generic_xmit(struct sock *sk)
skb_put(skb, len);
addr = desc.addr;
- buffer = xsk_buff_raw_get_data(xs->umem, addr);
+ buffer = xsk_buff_raw_get_data(xs->pool, addr);
err = skb_store_bits(skb, 0, buffer, len);
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
@@ -762,6 +769,8 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
return PTR_ERR(umem);
}
+ xs->pool = umem->pool;
+
/* Make sure umem is ready before it can be seen by others */
smp_wmb();
WRITE_ONCE(xs->umem, umem);