summaryrefslogtreecommitdiffstats
path: root/net/xdp
diff options
context:
space:
mode:
authorMagnus Karlsson <magnus.karlsson@intel.com>2019-12-19 13:39:24 +0100
committerAlexei Starovoitov <ast@kernel.org>2019-12-20 16:00:09 -0800
commit4b638f13bab4dbfe8569c84e374e8201a427115c (patch)
tree49b18b735e3810ca3dcd0f6375eb9f7dc4740982 /net/xdp
parent59e35e552529b858f35b30bc5a803ea532ca17f1 (diff)
downloadlinux-4b638f13bab4dbfe8569c84e374e8201a427115c.tar.gz
linux-4b638f13bab4dbfe8569c84e374e8201a427115c.tar.bz2
linux-4b638f13bab4dbfe8569c84e374e8201a427115c.zip
xsk: Eliminate the RX batch size
In the xsk consumer ring code there is a variable called RX_BATCH_SIZE that dictates the minimum number of entries that we try to grab from the fill and Tx rings. In fact, the code always try to grab the maximum amount of entries from these rings. The only thing this variable does is to throw an error if there is less than 16 (as it is defined) entries on the ring. There is no reason to do this and it will just lead to weird behavior from user space's point of view. So eliminate this variable. With this change, we will be able to simplify the xskq_nb_free and xskq_nb_avail code in the next commit. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/1576759171-28550-6-git-send-email-magnus.karlsson@intel.com
Diffstat (limited to 'net/xdp')
-rw-r--r--net/xdp/xsk_queue.h6
1 files changed, 2 insertions, 4 deletions
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index 1b9a350f2e66..6ca5fed87852 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -10,8 +10,6 @@
#include <linux/if_xdp.h>
#include <net/xdp_sock.h>
-#define RX_BATCH_SIZE 16
-
struct xdp_ring {
u32 producer ____cacheline_aligned_in_smp;
u32 consumer ____cacheline_aligned_in_smp;
@@ -202,7 +200,7 @@ static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr,
if (q->cons_tail == q->cons_head) {
smp_mb(); /* D, matches A */
WRITE_ONCE(q->ring->consumer, q->cons_tail);
- q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
+ q->cons_head = q->cons_tail + xskq_nb_avail(q, 1);
/* Order consumer and data */
smp_rmb();
@@ -320,7 +318,7 @@ static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q,
if (q->cons_tail == q->cons_head) {
smp_mb(); /* D, matches A */
WRITE_ONCE(q->ring->consumer, q->cons_tail);
- q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE);
+ q->cons_head = q->cons_tail + xskq_nb_avail(q, 1);
/* Order consumer and data */
smp_rmb(); /* C, matches B */